Index: src/spaces.cc |
diff --git a/src/spaces.cc b/src/spaces.cc |
index a0c8f2cba168c6bc06bc0fe04879e0e4cd8d8c54..2e07f3eef099b5de3d7fc3bf6a39bfeaf55ada8c 100644 |
--- a/src/spaces.cc |
+++ b/src/spaces.cc |
@@ -327,7 +327,7 @@ void MemoryAllocator::FreeMemory(Address base, |
ASSERT(size_executable_ >= size); |
size_executable_ -= size; |
} |
- if (isolate_->code_range()->contains(static_cast<Address>(base))) { |
+ if (isolate_->code_range()->contains(base)) { |
ASSERT(executable == EXECUTABLE); |
isolate_->code_range()->FreeRawMemory(base, size); |
} else { |
@@ -555,9 +555,58 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, |
Increment(static_cast<int>(chunk_size)); |
LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); |
- if (owner != NULL) { |
- ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); |
- PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); |
+ |
+ ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); |
Michael Starzinger
2012/06/25 15:49:19
The deoptimizer (i.e. Deoptimizer::CreateCode) use
|
+ PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); |
+ |
+ switch (owner->identity()) { |
+ case OLD_POINTER_SPACE: |
+ isolate_->counters()->old_pointer_space_bytes_allocated()-> |
+ Increment(static_cast<int>(chunk_size)); |
+ isolate_->counters()->old_pointer_space_bytes_used()-> |
+ Set(static_cast<int>(owner->SizeOfObjects())); |
+ break; |
+ |
+ case OLD_DATA_SPACE: |
+ isolate_->counters()->old_data_space_bytes_allocated()-> |
+ Increment(static_cast<int>(chunk_size)); |
+ isolate_->counters()->old_data_space_bytes_allocated()-> |
+ Set(static_cast<int>(owner->SizeOfObjects())); |
+ break; |
+ |
+ case CODE_SPACE: |
+ isolate_->counters()->code_space_bytes_allocated()-> |
+ Increment(static_cast<int>(chunk_size)); |
+ isolate_->counters()->code_space_bytes_allocated()-> |
+ Set(static_cast<int>(owner->SizeOfObjects())); |
+ break; |
+ |
+ case MAP_SPACE: |
+ isolate_->counters()->map_space_bytes_allocated()-> |
+ Increment(static_cast<int>(chunk_size)); |
+ isolate_->counters()->map_space_bytes_allocated()-> |
+ Set(static_cast<int>(owner->SizeOfObjects())); |
+ break; |
+ |
+ case CELL_SPACE: |
+ isolate_->counters()->cell_space_bytes_allocated()-> |
+ Increment(static_cast<int>(chunk_size)); |
+ isolate_->counters()->cell_space_bytes_allocated()-> |
+ Set(static_cast<int>(owner->SizeOfObjects())); |
+ break; |
+ |
+ case LO_SPACE: |
+ isolate_->counters()->lo_space_bytes_allocated()-> |
+ Increment(static_cast<int>(chunk_size)); |
+ isolate_->counters()->lo_space_bytes_allocated()-> |
+ Set(static_cast<int>(owner->SizeOfObjects())); |
+ break; |
+ |
+ case NEW_SPACE: |
+ // NEW_SPACE is released using ReserveAlignedMemory and CommitBlock. |
+ |
+ default: |
+ UNREACHABLE(); |
} |
MemoryChunk* result = MemoryChunk::Initialize(heap, |
@@ -594,11 +643,10 @@ LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, |
void MemoryAllocator::Free(MemoryChunk* chunk) { |
LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); |
- if (chunk->owner() != NULL) { |
- ObjectSpace space = |
- static_cast<ObjectSpace>(1 << chunk->owner()->identity()); |
- PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); |
- } |
+ size_t chunk_size; |
+ ObjectSpace space = |
+ static_cast<ObjectSpace>(1 << chunk->owner()->identity()); |
+ PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); |
isolate_->heap()->RememberUnmappedPage( |
reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate()); |
@@ -608,12 +656,52 @@ void MemoryAllocator::Free(MemoryChunk* chunk) { |
VirtualMemory* reservation = chunk->reserved_memory(); |
if (reservation->IsReserved()) { |
+ chunk_size = reservation->size(); |
FreeMemory(reservation, chunk->executable()); |
} else { |
+ chunk_size = chunk->size(); |
FreeMemory(chunk->address(), |
chunk->size(), |
chunk->executable()); |
} |
+ |
+ switch (chunk->owner()->identity()) { |
+ case OLD_POINTER_SPACE: |
+ isolate_->counters()->old_pointer_space_bytes_allocated()-> |
+ Decrement(static_cast<int>(chunk_size)); |
+ break; |
+ |
+ case OLD_DATA_SPACE: |
+ isolate_->counters()->old_data_space_bytes_allocated()-> |
+ Decrement(static_cast<int>(chunk_size)); |
+ break; |
+ |
+ case CODE_SPACE: |
+ isolate_->counters()->code_space_bytes_allocated()-> |
+ Decrement(static_cast<int>(chunk_size)); |
+ break; |
+ |
+ case MAP_SPACE: |
+ isolate_->counters()->map_space_bytes_allocated()-> |
+ Decrement(static_cast<int>(chunk_size)); |
+ break; |
+ |
+ case CELL_SPACE: |
+ isolate_->counters()->cell_space_bytes_allocated()-> |
+ Decrement(static_cast<int>(chunk_size)); |
+ break; |
+ |
+ case LO_SPACE: |
+ isolate_->counters()->lo_space_bytes_allocated()-> |
+ Decrement(static_cast<int>(chunk_size)); |
+ break; |
+ |
+ case NEW_SPACE: |
+ // NEW_SPACE is released using UncommitBlock. |
+ |
+ default: |
+ UNREACHABLE(); |
+ } |
} |
@@ -625,6 +713,8 @@ bool MemoryAllocator::CommitBlock(Address start, |
ZapBlock(start, size); |
#endif |
isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); |
+ isolate_->counters()->new_space_bytes_allocated()->Increment( |
+ static_cast<int>(size)); |
return true; |
} |
@@ -632,6 +722,8 @@ bool MemoryAllocator::CommitBlock(Address start, |
bool MemoryAllocator::UncommitBlock(Address start, size_t size) { |
if (!VirtualMemory::UncommitRegion(start, size)) return false; |
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
+ isolate_->counters()->new_space_bytes_allocated()->Decrement( |
+ static_cast<int>(size)); |
return true; |
} |