OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 6900 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6911 if (chunk->owner()->identity() == LO_SPACE) { | 6911 if (chunk->owner()->identity() == LO_SPACE) { |
6912 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress. | 6912 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress. |
6913 // If FromAnyPointerAddress encounters a slot that belongs to a large | 6913 // If FromAnyPointerAddress encounters a slot that belongs to a large |
6914 // chunk queued for deletion it will fail to find the chunk because | 6914 // chunk queued for deletion it will fail to find the chunk because |
6915 // it try to perform a search in the list of pages owned by of the large | 6915 // it try to perform a search in the list of pages owned by of the large |
6916 // object space and queued chunks were detached from that list. | 6916 // object space and queued chunks were detached from that list. |
6917 // To work around this we split large chunk into normal kPageSize aligned | 6917 // To work around this we split large chunk into normal kPageSize aligned |
6918 // pieces and initialize size, owner and flags field of every piece. | 6918 // pieces and initialize size, owner and flags field of every piece. |
6919 // If FromAnyPointerAddress encounters a slot that belongs to one of | 6919 // If FromAnyPointerAddress encounters a slot that belongs to one of |
6920 // these smaller pieces it will treat it as a slot on a normal Page. | 6920 // these smaller pieces it will treat it as a slot on a normal Page. |
| 6921 Address chunk_end = chunk->address() + chunk->size(); |
6921 MemoryChunk* inner = MemoryChunk::FromAddress( | 6922 MemoryChunk* inner = MemoryChunk::FromAddress( |
6922 chunk->address() + Page::kPageSize); | 6923 chunk->address() + Page::kPageSize); |
6923 MemoryChunk* inner_last = MemoryChunk::FromAddress( | 6924 MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1); |
6924 chunk->address() + chunk->size() - 1); | |
6925 while (inner <= inner_last) { | 6925 while (inner <= inner_last) { |
6926 // Size of a large chunk is always a multiple of | 6926 // Size of a large chunk is always a multiple of |
6927 // OS::AllocateAlignment() so there is always | 6927 // OS::AllocateAlignment() so there is always |
6928 // enough space for a fake MemoryChunk header. | 6928 // enough space for a fake MemoryChunk header. |
| 6929 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end); |
| 6930 // Guard against overflow. |
| 6931 if (area_end < inner->address()) area_end = chunk_end; |
| 6932 inner->SetArea(inner->address(), area_end); |
6929 inner->set_size(Page::kPageSize); | 6933 inner->set_size(Page::kPageSize); |
6930 inner->set_owner(lo_space()); | 6934 inner->set_owner(lo_space()); |
6931 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED); | 6935 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED); |
6932 inner = MemoryChunk::FromAddress( | 6936 inner = MemoryChunk::FromAddress( |
6933 inner->address() + Page::kPageSize); | 6937 inner->address() + Page::kPageSize); |
6934 } | 6938 } |
6935 } | 6939 } |
6936 } | 6940 } |
6937 isolate_->heap()->store_buffer()->Compact(); | 6941 isolate_->heap()->store_buffer()->Compact(); |
6938 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED); | 6942 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED); |
6939 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) { | 6943 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) { |
6940 next = chunk->next_chunk(); | 6944 next = chunk->next_chunk(); |
6941 isolate_->memory_allocator()->Free(chunk); | 6945 isolate_->memory_allocator()->Free(chunk); |
6942 } | 6946 } |
6943 chunks_queued_for_free_ = NULL; | 6947 chunks_queued_for_free_ = NULL; |
6944 } | 6948 } |
6945 | 6949 |
6946 } } // namespace v8::internal | 6950 } } // namespace v8::internal |
OLD | NEW |