Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 432 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 443 chunk->area_start_ = area_start; | 443 chunk->area_start_ = area_start; |
| 444 chunk->area_end_ = area_end; | 444 chunk->area_end_ = area_end; |
| 445 chunk->flags_ = 0; | 445 chunk->flags_ = 0; |
| 446 chunk->set_owner(owner); | 446 chunk->set_owner(owner); |
| 447 chunk->InitializeReservedMemory(); | 447 chunk->InitializeReservedMemory(); |
| 448 chunk->slots_buffer_ = NULL; | 448 chunk->slots_buffer_ = NULL; |
| 449 chunk->skip_list_ = NULL; | 449 chunk->skip_list_ = NULL; |
| 450 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; | 450 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; |
| 451 chunk->progress_bar_ = 0; | 451 chunk->progress_bar_ = 0; |
| 452 chunk->high_water_mark_ = static_cast<int>(area_start - base); | 452 chunk->high_water_mark_ = static_cast<int>(area_start - base); |
| 453 chunk->parallel_sweeping_ = 0; | |
| 453 chunk->ResetLiveBytes(); | 454 chunk->ResetLiveBytes(); |
| 454 Bitmap::Clear(chunk); | 455 Bitmap::Clear(chunk); |
| 455 chunk->initialize_scan_on_scavenge(false); | 456 chunk->initialize_scan_on_scavenge(false); |
| 456 chunk->SetFlag(WAS_SWEPT_PRECISELY); | 457 chunk->SetFlag(WAS_SWEPT_PRECISELY); |
| 457 | 458 |
| 458 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); | 459 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); |
| 459 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); | 460 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); |
| 460 | 461 |
| 461 if (executable == EXECUTABLE) { | 462 if (executable == EXECUTABLE) { |
| 462 chunk->SetFlag(IS_EXECUTABLE); | 463 chunk->SetFlag(IS_EXECUTABLE); |
| (...skipping 1460 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1923 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize); | 1924 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize); |
| 1924 Memory::Address_at(address() + kNextOffset) = | 1925 Memory::Address_at(address() + kNextOffset) = |
| 1925 reinterpret_cast<Address>(next); | 1926 reinterpret_cast<Address>(next); |
| 1926 } else { | 1927 } else { |
| 1927 Memory::Address_at(address() + kPointerSize) = | 1928 Memory::Address_at(address() + kPointerSize) = |
| 1928 reinterpret_cast<Address>(next); | 1929 reinterpret_cast<Address>(next); |
| 1929 } | 1930 } |
| 1930 } | 1931 } |
| 1931 | 1932 |
| 1932 | 1933 |
| 1934 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) { | |
| 1935 intptr_t free_bytes = 0; | |
| 1936 if (category->top_ != NULL) { | |
| 1937 ASSERT(category->end_ != NULL); | |
| 1938 // This is safe (not going to deadlock) since Concatenate operations | |
| 1939 // are never performed on the same free lists at the same time in | |
| 1940 // reverse order. | |
| 1941 ScopedLock lock_target(mutex_); | |
| 1942 ScopedLock lock_source(category->mutex()); | |
| 1943 free_bytes = category->available(); | |
| 1944 if (end_ == NULL) { | |
| 1945 end_ = category->end(); | |
| 1946 } else { | |
| 1947 category->end()->set_next(top_); | |
| 1948 } | |
| 1949 top_ = category->top(); | |
| 1950 available_ += category->available(); | |
| 1951 category->Reset(); | |
| 1952 } | |
| 1953 return free_bytes; | |
| 1954 } | |
| 1955 | |
| 1956 | |
| 1933 void FreeListCategory::Reset() { | 1957 void FreeListCategory::Reset() { |
| 1934 top_ = NULL; | 1958 top_ = NULL; |
| 1935 end_ = NULL; | 1959 end_ = NULL; |
| 1936 available_ = 0; | 1960 available_ = 0; |
| 1937 } | 1961 } |
| 1938 | 1962 |
| 1939 | 1963 |
| 1940 intptr_t FreeListCategory::CountFreeListItemsInList(Page* p) { | 1964 intptr_t FreeListCategory::CountFreeListItemsInList(Page* p) { |
| 1941 int sum = 0; | 1965 int sum = 0; |
| 1942 FreeListNode* n = top_; | 1966 FreeListNode* n = top_; |
| (...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2021 } | 2045 } |
| 2022 } | 2046 } |
| 2023 | 2047 |
| 2024 | 2048 |
| 2025 FreeList::FreeList(PagedSpace* owner) | 2049 FreeList::FreeList(PagedSpace* owner) |
| 2026 : owner_(owner), heap_(owner->heap()) { | 2050 : owner_(owner), heap_(owner->heap()) { |
| 2027 Reset(); | 2051 Reset(); |
| 2028 } | 2052 } |
| 2029 | 2053 |
| 2030 | 2054 |
| 2055 intptr_t FreeList::Concatenate(FreeList* free_list) { | |
| 2056 intptr_t free_bytes = 0; | |
| 2057 free_bytes += small_list_.Concatenate(free_list->small_list()); | |
| 2058 free_bytes += medium_list_.Concatenate(free_list->medium_list()); | |
| 2059 free_bytes += large_list_.Concatenate(free_list->large_list()); | |
| 2060 free_bytes += huge_list_.Concatenate(free_list->huge_list()); | |
| 2061 return free_bytes; | |
| 2062 } | |
| 2063 | |
| 2064 | |
| 2031 void FreeList::Reset() { | 2065 void FreeList::Reset() { |
| 2032 small_list_.Reset(); | 2066 small_list_.Reset(); |
| 2033 medium_list_.Reset(); | 2067 medium_list_.Reset(); |
| 2034 large_list_.Reset(); | 2068 large_list_.Reset(); |
| 2035 huge_list_.Reset(); | 2069 huge_list_.Reset(); |
| 2036 } | 2070 } |
| 2037 | 2071 |
| 2038 | 2072 |
| 2039 int FreeList::Free(Address start, int size_in_bytes) { | 2073 int FreeList::Free(Address start, int size_in_bytes) { |
| 2040 if (size_in_bytes == 0) return 0; | 2074 if (size_in_bytes == 0) return 0; |
| (...skipping 344 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2385 intptr_t freed_bytes = 0; | 2419 intptr_t freed_bytes = 0; |
| 2386 Page* p = first_unswept_page_; | 2420 Page* p = first_unswept_page_; |
| 2387 do { | 2421 do { |
| 2388 Page* next_page = p->next_page(); | 2422 Page* next_page = p->next_page(); |
| 2389 if (ShouldBeSweptLazily(p)) { | 2423 if (ShouldBeSweptLazily(p)) { |
| 2390 if (FLAG_gc_verbose) { | 2424 if (FLAG_gc_verbose) { |
| 2391 PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n", | 2425 PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n", |
| 2392 reinterpret_cast<intptr_t>(p)); | 2426 reinterpret_cast<intptr_t>(p)); |
| 2393 } | 2427 } |
| 2394 DecreaseUnsweptFreeBytes(p); | 2428 DecreaseUnsweptFreeBytes(p); |
| 2395 freed_bytes += MarkCompactCollector::SweepConservatively(this, p); | 2429 freed_bytes += MarkCompactCollector::SweepConservatively( |
| 2430 this, this->free_list(), p); | |
| 2396 } | 2431 } |
| 2397 p = next_page; | 2432 p = next_page; |
| 2398 } while (p != anchor() && freed_bytes < bytes_to_sweep); | 2433 } while (p != anchor() && freed_bytes < bytes_to_sweep); |
| 2399 | 2434 |
| 2400 if (p == anchor()) { | 2435 if (p == anchor()) { |
| 2401 first_unswept_page_ = Page::FromAddress(NULL); | 2436 first_unswept_page_ = Page::FromAddress(NULL); |
| 2402 } else { | 2437 } else { |
| 2403 first_unswept_page_ = p; | 2438 first_unswept_page_ = p; |
| 2404 } | 2439 } |
| 2405 | 2440 |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 2419 heap()->CreateFillerObjectAt(allocation_info_.top, remaining); | 2454 heap()->CreateFillerObjectAt(allocation_info_.top, remaining); |
| 2420 | 2455 |
| 2421 allocation_info_.top = NULL; | 2456 allocation_info_.top = NULL; |
| 2422 allocation_info_.limit = NULL; | 2457 allocation_info_.limit = NULL; |
| 2423 } | 2458 } |
| 2424 } | 2459 } |
| 2425 | 2460 |
| 2426 | 2461 |
| 2427 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { | 2462 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { |
| 2428 // Allocation in this space has failed. | 2463 // Allocation in this space has failed. |
| 2429 | 2464 if (heap()->IsConcurrentSweepingActivated()) { |
|
Michael Starzinger
2013/01/24 17:15:54
The SlowAllocateRaw() function is getting too comp
Hannes Payer (out of office)
2013/01/25 10:46:49
Done.
| |
| 2430 // If there are unswept pages advance lazy sweeper a bounded number of times | 2465 heap()->StealMemoryFromSweeperThreads(this); |
| 2431 // until we find a size_in_bytes contiguous piece of memory | |
| 2432 const int kMaxSweepingTries = 5; | |
| 2433 bool sweeping_complete = false; | |
| 2434 | |
| 2435 for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) { | |
| 2436 sweeping_complete = AdvanceSweeper(size_in_bytes); | |
| 2437 | |
| 2438 // Retry the free list allocation. | |
| 2439 HeapObject* object = free_list_.Allocate(size_in_bytes); | 2466 HeapObject* object = free_list_.Allocate(size_in_bytes); |
| 2440 if (object != NULL) return object; | 2467 if (object != NULL) return object; |
| 2468 | |
| 2469 if (heap()->IsConcurrentSweepingPending()) { | |
| 2470 heap()->WaitUntilParallelSweepingCompleted(); | |
| 2471 } | |
| 2472 | |
| 2473 heap()->StealMemoryFromSweeperThreads(this); | |
| 2474 | |
| 2475 object = free_list_.Allocate(size_in_bytes); | |
| 2476 if (object != NULL) return object; | |
| 2477 } else { | |
| 2478 // If there are unswept pages advance lazy sweeper a bounded number of | |
| 2479 // times until we find a size_in_bytes contiguous piece of memory | |
| 2480 const int kMaxSweepingTries = 5; | |
| 2481 bool sweeping_complete = false; | |
| 2482 | |
| 2483 for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) { | |
| 2484 sweeping_complete = AdvanceSweeper(size_in_bytes); | |
| 2485 | |
| 2486 // Retry the free list allocation. | |
| 2487 HeapObject* object = free_list_.Allocate(size_in_bytes); | |
| 2488 if (object != NULL) return object; | |
| 2489 } | |
| 2490 | |
| 2491 // Last ditch, sweep all the remaining pages to try to find space. This may | |
| 2492 // cause a pause. | |
| 2493 if (!IsSweepingComplete()) { | |
| 2494 AdvanceSweeper(kMaxInt); | |
| 2495 | |
| 2496 // Retry the free list allocation. | |
| 2497 HeapObject* object = free_list_.Allocate(size_in_bytes); | |
| 2498 if (object != NULL) return object; | |
| 2499 } | |
| 2441 } | 2500 } |
| 2442 | 2501 |
| 2443 // Free list allocation failed and there is no next page. Fail if we have | 2502 // Free list allocation failed and there is no next page. Fail if we have |
| 2444 // hit the old generation size limit that should cause a garbage | 2503 // hit the old generation size limit that should cause a garbage |
| 2445 // collection. | 2504 // collection. |
| 2446 if (!heap()->always_allocate() && | 2505 if (!heap()->always_allocate() && |
| 2447 heap()->OldGenerationAllocationLimitReached()) { | 2506 heap()->OldGenerationAllocationLimitReached()) { |
| 2448 return NULL; | 2507 return NULL; |
| 2449 } | 2508 } |
| 2450 | 2509 |
| 2451 // Try to expand the space and allocate in the new next page. | 2510 // Try to expand the space and allocate in the new next page. |
| 2452 if (Expand()) { | 2511 if (Expand()) { |
| 2453 return free_list_.Allocate(size_in_bytes); | 2512 return free_list_.Allocate(size_in_bytes); |
| 2454 } | 2513 } |
| 2455 | 2514 |
| 2456 // Last ditch, sweep all the remaining pages to try to find space. This may | |
| 2457 // cause a pause. | |
| 2458 if (!IsSweepingComplete()) { | |
| 2459 AdvanceSweeper(kMaxInt); | |
| 2460 | |
| 2461 // Retry the free list allocation. | |
| 2462 HeapObject* object = free_list_.Allocate(size_in_bytes); | |
| 2463 if (object != NULL) return object; | |
| 2464 } | |
| 2465 | |
| 2466 // Finally, fail. | 2515 // Finally, fail. |
| 2467 return NULL; | 2516 return NULL; |
| 2468 } | 2517 } |
| 2469 | 2518 |
| 2470 | 2519 |
| 2471 #ifdef DEBUG | 2520 #ifdef DEBUG |
| 2472 void PagedSpace::ReportCodeStatistics() { | 2521 void PagedSpace::ReportCodeStatistics() { |
| 2473 Isolate* isolate = Isolate::Current(); | 2522 Isolate* isolate = Isolate::Current(); |
| 2474 CommentStatistic* comments_statistics = | 2523 CommentStatistic* comments_statistics = |
| 2475 isolate->paged_space_comments_statistics(); | 2524 isolate->paged_space_comments_statistics(); |
| (...skipping 510 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2986 object->ShortPrint(); | 3035 object->ShortPrint(); |
| 2987 PrintF("\n"); | 3036 PrintF("\n"); |
| 2988 } | 3037 } |
| 2989 printf(" --------------------------------------\n"); | 3038 printf(" --------------------------------------\n"); |
| 2990 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3039 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 2991 } | 3040 } |
| 2992 | 3041 |
| 2993 #endif // DEBUG | 3042 #endif // DEBUG |
| 2994 | 3043 |
| 2995 } } // namespace v8::internal | 3044 } } // namespace v8::internal |
| OLD | NEW |