OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 2502 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2513 if (current_ == NULL) return NULL; | 2513 if (current_ == NULL) return NULL; |
2514 | 2514 |
2515 HeapObject* object = current_->GetObject(); | 2515 HeapObject* object = current_->GetObject(); |
2516 current_ = current_->next_page(); | 2516 current_ = current_->next_page(); |
2517 return object; | 2517 return object; |
2518 } | 2518 } |
2519 | 2519 |
2520 | 2520 |
2521 // ----------------------------------------------------------------------------- | 2521 // ----------------------------------------------------------------------------- |
2522 // LargeObjectSpace | 2522 // LargeObjectSpace |
| 2523 static bool ComparePointers(void* key1, void* key2) { |
| 2524 return key1 == key2; |
| 2525 } |
| 2526 |
2523 | 2527 |
2524 LargeObjectSpace::LargeObjectSpace(Heap* heap, | 2528 LargeObjectSpace::LargeObjectSpace(Heap* heap, |
2525 intptr_t max_capacity, | 2529 intptr_t max_capacity, |
2526 AllocationSpace id) | 2530 AllocationSpace id) |
2527 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis | 2531 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis |
2528 max_capacity_(max_capacity), | 2532 max_capacity_(max_capacity), |
2529 first_page_(NULL), | 2533 first_page_(NULL), |
2530 size_(0), | 2534 size_(0), |
2531 page_count_(0), | 2535 page_count_(0), |
2532 objects_size_(0) {} | 2536 objects_size_(0), |
| 2537 chunk_map_(ComparePointers, 1024) {} |
2533 | 2538 |
2534 | 2539 |
2535 bool LargeObjectSpace::SetUp() { | 2540 bool LargeObjectSpace::SetUp() { |
2536 first_page_ = NULL; | 2541 first_page_ = NULL; |
2537 size_ = 0; | 2542 size_ = 0; |
2538 page_count_ = 0; | 2543 page_count_ = 0; |
2539 objects_size_ = 0; | 2544 objects_size_ = 0; |
| 2545 chunk_map_.Clear(); |
2540 return true; | 2546 return true; |
2541 } | 2547 } |
2542 | 2548 |
2543 | 2549 |
2544 void LargeObjectSpace::TearDown() { | 2550 void LargeObjectSpace::TearDown() { |
2545 while (first_page_ != NULL) { | 2551 while (first_page_ != NULL) { |
2546 LargePage* page = first_page_; | 2552 LargePage* page = first_page_; |
2547 first_page_ = first_page_->next_page(); | 2553 first_page_ = first_page_->next_page(); |
2548 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address())); | 2554 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address())); |
2549 | 2555 |
(...skipping 23 matching lines...) Expand all Loading... |
2573 AllocateLargePage(object_size, executable, this); | 2579 AllocateLargePage(object_size, executable, this); |
2574 if (page == NULL) return Failure::RetryAfterGC(identity()); | 2580 if (page == NULL) return Failure::RetryAfterGC(identity()); |
2575 ASSERT(page->area_size() >= object_size); | 2581 ASSERT(page->area_size() >= object_size); |
2576 | 2582 |
2577 size_ += static_cast<int>(page->size()); | 2583 size_ += static_cast<int>(page->size()); |
2578 objects_size_ += object_size; | 2584 objects_size_ += object_size; |
2579 page_count_++; | 2585 page_count_++; |
2580 page->set_next_page(first_page_); | 2586 page->set_next_page(first_page_); |
2581 first_page_ = page; | 2587 first_page_ = page; |
2582 | 2588 |
| 2589 // Register all MemoryChunk::kAlignment-aligned chunks covered by |
| 2590 // this large page in the chunk map. |
| 2591 uintptr_t base = reinterpret_cast<uintptr_t>(page)/MemoryChunk::kAlignment; |
| 2592 uintptr_t limit = base + (page->size()-1)/MemoryChunk::kAlignment; |
| 2593 for (uintptr_t key = base; key <= limit; key++) { |
| 2594 HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key), |
| 2595 key, true); |
| 2596 ASSERT(entry != NULL); |
| 2597 entry->value = page; |
| 2598 } |
| 2599 |
2583 HeapObject* object = page->GetObject(); | 2600 HeapObject* object = page->GetObject(); |
2584 | 2601 |
2585 #ifdef DEBUG | 2602 #ifdef DEBUG |
2586 // Make the object consistent so the heap can be vefified in OldSpaceStep. | 2603 // Make the object consistent so the heap can be vefified in OldSpaceStep. |
2587 reinterpret_cast<Object**>(object->address())[0] = | 2604 reinterpret_cast<Object**>(object->address())[0] = |
2588 heap()->fixed_array_map(); | 2605 heap()->fixed_array_map(); |
2589 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0); | 2606 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0); |
2590 #endif | 2607 #endif |
2591 | 2608 |
2592 heap()->incremental_marking()->OldSpaceStep(object_size); | 2609 heap()->incremental_marking()->OldSpaceStep(object_size); |
2593 return object; | 2610 return object; |
2594 } | 2611 } |
2595 | 2612 |
2596 | 2613 |
2597 // GC support | 2614 // GC support |
2598 MaybeObject* LargeObjectSpace::FindObject(Address a) { | 2615 MaybeObject* LargeObjectSpace::FindObject(Address a) { |
2599 for (LargePage* page = first_page_; | 2616 LargePage* page = FindPage(a); |
2600 page != NULL; | 2617 if (page != NULL) { |
2601 page = page->next_page()) { | 2618 return page->GetObject(); |
2602 Address page_address = page->address(); | |
2603 if (page_address <= a && a < page_address + page->size()) { | |
2604 return page->GetObject(); | |
2605 } | |
2606 } | 2619 } |
2607 return Failure::Exception(); | 2620 return Failure::Exception(); |
2608 } | 2621 } |
2609 | 2622 |
2610 | 2623 |
2611 LargePage* LargeObjectSpace::FindPageContainingPc(Address pc) { | 2624 LargePage* LargeObjectSpace::FindPage(Address a) { |
2612 // TODO(853): Change this implementation to only find executable | 2625 uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment; |
2613 // chunks and use some kind of hash-based approach to speed it up. | 2626 HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key), |
2614 for (LargePage* chunk = first_page_; | 2627 key, false); |
2615 chunk != NULL; | 2628 if (e != NULL) { |
2616 chunk = chunk->next_page()) { | 2629 ASSERT(e->value != NULL); |
2617 Address chunk_address = chunk->address(); | 2630 LargePage* page = reinterpret_cast<LargePage*>(e->value); |
2618 if (chunk_address <= pc && pc < chunk_address + chunk->size()) { | 2631 ASSERT(page->is_valid()); |
2619 return chunk; | 2632 if (page->Contains(a)) { |
| 2633 return page; |
2620 } | 2634 } |
2621 } | 2635 } |
2622 return NULL; | 2636 return NULL; |
2623 } | 2637 } |
2624 | 2638 |
2625 | 2639 |
2626 void LargeObjectSpace::FreeUnmarkedObjects() { | 2640 void LargeObjectSpace::FreeUnmarkedObjects() { |
2627 LargePage* previous = NULL; | 2641 LargePage* previous = NULL; |
2628 LargePage* current = first_page_; | 2642 LargePage* current = first_page_; |
2629 while (current != NULL) { | 2643 while (current != NULL) { |
(...skipping 17 matching lines...) Expand all Loading... |
2647 previous->set_next_page(current); | 2661 previous->set_next_page(current); |
2648 } | 2662 } |
2649 | 2663 |
2650 // Free the chunk. | 2664 // Free the chunk. |
2651 heap()->mark_compact_collector()->ReportDeleteIfNeeded( | 2665 heap()->mark_compact_collector()->ReportDeleteIfNeeded( |
2652 object, heap()->isolate()); | 2666 object, heap()->isolate()); |
2653 size_ -= static_cast<int>(page->size()); | 2667 size_ -= static_cast<int>(page->size()); |
2654 objects_size_ -= object->Size(); | 2668 objects_size_ -= object->Size(); |
2655 page_count_--; | 2669 page_count_--; |
2656 | 2670 |
| 2671 // Remove entries belonging to this page. |
| 2672 // Use variable alignment to help pass length check (<= 80 characters) |
| 2673 // of single line in tools/presubmit.py. |
| 2674 const intptr_t alignment = MemoryChunk::kAlignment; |
| 2675 uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment; |
| 2676 uintptr_t limit = base + (page->size()-1)/alignment; |
| 2677 for (uintptr_t key = base; key <= limit; key++) { |
| 2678 chunk_map_.Remove(reinterpret_cast<void*>(key), key); |
| 2679 } |
| 2680 |
2657 if (is_pointer_object) { | 2681 if (is_pointer_object) { |
2658 heap()->QueueMemoryChunkForFree(page); | 2682 heap()->QueueMemoryChunkForFree(page); |
2659 } else { | 2683 } else { |
2660 heap()->isolate()->memory_allocator()->Free(page); | 2684 heap()->isolate()->memory_allocator()->Free(page); |
2661 } | 2685 } |
2662 } | 2686 } |
2663 } | 2687 } |
2664 heap()->FreeQueuedChunks(); | 2688 heap()->FreeQueuedChunks(); |
2665 } | 2689 } |
2666 | 2690 |
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2782 object->ShortPrint(); | 2806 object->ShortPrint(); |
2783 PrintF("\n"); | 2807 PrintF("\n"); |
2784 } | 2808 } |
2785 printf(" --------------------------------------\n"); | 2809 printf(" --------------------------------------\n"); |
2786 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 2810 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
2787 } | 2811 } |
2788 | 2812 |
2789 #endif // DEBUG | 2813 #endif // DEBUG |
2790 | 2814 |
2791 } } // namespace v8::internal | 2815 } } // namespace v8::internal |
OLD | NEW |