Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1055)

Side by Side Diff: src/spaces.cc

Issue 11566011: Use MemoryChunk-based allocation for deoptimization entry code (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 7 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.h ('k') | test/cctest/test-alloc.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 188 matching lines...) Expand 10 before | Expand all | Expand 10 after
199 if (requested <= allocation_list_[current_allocation_block_index_].size) { 199 if (requested <= allocation_list_[current_allocation_block_index_].size) {
200 return; // Found a large enough allocation block. 200 return; // Found a large enough allocation block.
201 } 201 }
202 } 202 }
203 203
204 // Code range is full or too fragmented. 204 // Code range is full or too fragmented.
205 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock"); 205 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
206 } 206 }
207 207
208 208
209 209 Address CodeRange::AllocateRawMemory(const size_t requested_size,
210 Address CodeRange::AllocateRawMemory(const size_t requested, 210 const size_t commit_size,
211 size_t* allocated) { 211 size_t* allocated) {
212 ASSERT(commit_size <= requested_size);
212 ASSERT(current_allocation_block_index_ < allocation_list_.length()); 213 ASSERT(current_allocation_block_index_ < allocation_list_.length());
213 if (requested > allocation_list_[current_allocation_block_index_].size) { 214 if (requested_size > allocation_list_[current_allocation_block_index_].size) {
214 // Find an allocation block large enough. This function call may 215 // Find an allocation block large enough. This function call may
215 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block. 216 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
216 GetNextAllocationBlock(requested); 217 GetNextAllocationBlock(requested_size);
217 } 218 }
218 // Commit the requested memory at the start of the current allocation block. 219 // Commit the requested memory at the start of the current allocation block.
219 size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment); 220 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
220 FreeBlock current = allocation_list_[current_allocation_block_index_]; 221 FreeBlock current = allocation_list_[current_allocation_block_index_];
221 if (aligned_requested >= (current.size - Page::kPageSize)) { 222 if (aligned_requested >= (current.size - Page::kPageSize)) {
222 // Don't leave a small free block, useless for a large object or chunk. 223 // Don't leave a small free block, useless for a large object or chunk.
223 *allocated = current.size; 224 *allocated = current.size;
224 } else { 225 } else {
225 *allocated = aligned_requested; 226 *allocated = aligned_requested;
226 } 227 }
227 ASSERT(*allocated <= current.size); 228 ASSERT(*allocated <= current.size);
228 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); 229 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
229 if (!MemoryAllocator::CommitCodePage(code_range_, 230 if (!MemoryAllocator::CommitExecutableMemory(code_range_,
230 current.start, 231 current.start,
231 *allocated)) { 232 commit_size,
233 *allocated)) {
232 *allocated = 0; 234 *allocated = 0;
233 return NULL; 235 return NULL;
234 } 236 }
235 allocation_list_[current_allocation_block_index_].start += *allocated; 237 allocation_list_[current_allocation_block_index_].start += *allocated;
236 allocation_list_[current_allocation_block_index_].size -= *allocated; 238 allocation_list_[current_allocation_block_index_].size -= *allocated;
237 if (*allocated == current.size) { 239 if (*allocated == current.size) {
238 GetNextAllocationBlock(0); // This block is used up, get the next one. 240 GetNextAllocationBlock(0); // This block is used up, get the next one.
239 } 241 }
240 return current.start; 242 return current.start;
241 } 243 }
242 244
243 245
246 bool CodeRange::CommitRawMemory(Address start, size_t length) {
247 return code_range_->Commit(start, length, true);
248 }
249
250
251 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
252 return code_range_->Uncommit(start, length);
253 }
254
255
244 void CodeRange::FreeRawMemory(Address address, size_t length) { 256 void CodeRange::FreeRawMemory(Address address, size_t length) {
245 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment)); 257 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
246 free_list_.Add(FreeBlock(address, length)); 258 free_list_.Add(FreeBlock(address, length));
247 code_range_->Uncommit(address, length); 259 code_range_->Uncommit(address, length);
248 } 260 }
249 261
250 262
251 void CodeRange::TearDown() { 263 void CodeRange::TearDown() {
252 delete code_range_; // Frees all memory in the virtual memory range. 264 delete code_range_; // Frees all memory in the virtual memory range.
253 code_range_ = NULL; 265 code_range_ = NULL;
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after
345 357
346 if (!reservation.IsReserved()) return NULL; 358 if (!reservation.IsReserved()) return NULL;
347 size_ += reservation.size(); 359 size_ += reservation.size();
348 Address base = RoundUp(static_cast<Address>(reservation.address()), 360 Address base = RoundUp(static_cast<Address>(reservation.address()),
349 alignment); 361 alignment);
350 controller->TakeControl(&reservation); 362 controller->TakeControl(&reservation);
351 return base; 363 return base;
352 } 364 }
353 365
354 366
355 Address MemoryAllocator::AllocateAlignedMemory(size_t size, 367 Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
368 size_t commit_size,
356 size_t alignment, 369 size_t alignment,
357 Executability executable, 370 Executability executable,
358 VirtualMemory* controller) { 371 VirtualMemory* controller) {
372 ASSERT(commit_size <= reserve_size);
359 VirtualMemory reservation; 373 VirtualMemory reservation;
360 Address base = ReserveAlignedMemory(size, alignment, &reservation); 374 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
361 if (base == NULL) return NULL; 375 if (base == NULL) return NULL;
362 376
363 if (executable == EXECUTABLE) { 377 if (executable == EXECUTABLE) {
364 if (!CommitCodePage(&reservation, base, size)) { 378 if (!CommitExecutableMemory(&reservation,
379 base,
380 commit_size,
381 reserve_size)) {
365 base = NULL; 382 base = NULL;
366 } 383 }
367 } else { 384 } else {
368 if (!reservation.Commit(base, size, false)) { 385 if (!reservation.Commit(base, commit_size, false)) {
369 base = NULL; 386 base = NULL;
370 } 387 }
371 } 388 }
372 389
373 if (base == NULL) { 390 if (base == NULL) {
374 // Failed to commit the body. Release the mapping and any partially 391 // Failed to commit the body. Release the mapping and any partially
375 // commited regions inside it. 392 // commited regions inside it.
376 reservation.Release(); 393 reservation.Release();
377 return NULL; 394 return NULL;
378 } 395 }
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
462 } 479 }
463 480
464 if (owner == heap->old_data_space()) { 481 if (owner == heap->old_data_space()) {
465 chunk->SetFlag(CONTAINS_ONLY_DATA); 482 chunk->SetFlag(CONTAINS_ONLY_DATA);
466 } 483 }
467 484
468 return chunk; 485 return chunk;
469 } 486 }
470 487
471 488
489 // Commit MemoryChunk area to the requested size.
490 bool MemoryChunk::CommitArea(size_t requested) {
491 size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
492 MemoryAllocator::CodePageGuardSize() : 0;
493 size_t header_size = area_start() - address() - guard_size;
494 size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize());
495 size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
496 OS::CommitPageSize());
497
498 if (commit_size > committed_size) {
499 // Commit size should be less or equal than the reserved size.
500 ASSERT(commit_size <= size() - 2 * guard_size);
501 // Append the committed area.
502 Address start = address() + committed_size + guard_size;
503 size_t length = commit_size - committed_size;
504 if (reservation_.IsReserved()) {
505 if (!reservation_.Commit(start, length, IsFlagSet(IS_EXECUTABLE))) {
506 return false;
507 }
508 } else {
509 CodeRange* code_range = heap_->isolate()->code_range();
510 ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
511 if (!code_range->CommitRawMemory(start, length)) return false;
512 }
513
514 if (Heap::ShouldZapGarbage()) {
515 heap_->isolate()->memory_allocator()->ZapBlock(start, length);
516 }
517 } else if (commit_size < committed_size) {
518 ASSERT(commit_size > 0);
519 // Shrink the committed area.
520 size_t length = committed_size - commit_size;
521 Address start = address() + committed_size + guard_size - length;
522 if (reservation_.IsReserved()) {
523 if (!reservation_.Uncommit(start, length)) return false;
524 } else {
525 CodeRange* code_range = heap_->isolate()->code_range();
526 ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
527 if (!code_range->UncommitRawMemory(start, length)) return false;
528 }
529 }
530
531 area_end_ = area_start_ + requested;
532 return true;
533 }
534
535
472 void MemoryChunk::InsertAfter(MemoryChunk* other) { 536 void MemoryChunk::InsertAfter(MemoryChunk* other) {
473 next_chunk_ = other->next_chunk_; 537 next_chunk_ = other->next_chunk_;
474 prev_chunk_ = other; 538 prev_chunk_ = other;
475 other->next_chunk_->prev_chunk_ = this; 539 other->next_chunk_->prev_chunk_ = this;
476 other->next_chunk_ = this; 540 other->next_chunk_ = this;
477 } 541 }
478 542
479 543
480 void MemoryChunk::Unlink() { 544 void MemoryChunk::Unlink() {
481 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) { 545 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
482 heap_->decrement_scan_on_scavenge_pages(); 546 heap_->decrement_scan_on_scavenge_pages();
483 ClearFlag(SCAN_ON_SCAVENGE); 547 ClearFlag(SCAN_ON_SCAVENGE);
484 } 548 }
485 next_chunk_->prev_chunk_ = prev_chunk_; 549 next_chunk_->prev_chunk_ = prev_chunk_;
486 prev_chunk_->next_chunk_ = next_chunk_; 550 prev_chunk_->next_chunk_ = next_chunk_;
487 prev_chunk_ = NULL; 551 prev_chunk_ = NULL;
488 next_chunk_ = NULL; 552 next_chunk_ = NULL;
489 } 553 }
490 554
491 555
492 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, 556 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
557 intptr_t commit_area_size,
493 Executability executable, 558 Executability executable,
494 Space* owner) { 559 Space* owner) {
560 ASSERT(commit_area_size <= reserve_area_size);
561
495 size_t chunk_size; 562 size_t chunk_size;
496 Heap* heap = isolate_->heap(); 563 Heap* heap = isolate_->heap();
497 Address base = NULL; 564 Address base = NULL;
498 VirtualMemory reservation; 565 VirtualMemory reservation;
499 Address area_start = NULL; 566 Address area_start = NULL;
500 Address area_end = NULL; 567 Address area_end = NULL;
501 568
569 //
570 // MemoryChunk layout:
571 //
572 // Executable
573 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
574 // | Header |
575 // +----------------------------+<- base + CodePageGuardStartOffset
576 // | Guard |
577 // +----------------------------+<- area_start_
578 // | Area |
579 // +----------------------------+<- area_end_ (area_start + commit_area_size)
580 // | Committed but not used |
581 // +----------------------------+<- aligned at OS page boundary
582 // | Reserved but not committed |
583 // +----------------------------+<- aligned at OS page boundary
584 // | Guard |
585 // +----------------------------+<- base + chunk_size
586 //
587 // Non-executable
588 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
589 // | Header |
590 // +----------------------------+<- area_start_ (base + kObjectStartOffset)
591 // | Area |
592 // +----------------------------+<- area_end_ (area_start + commit_area_size)
593 // | Committed but not used |
594 // +----------------------------+<- aligned at OS page boundary
595 // | Reserved but not committed |
596 // +----------------------------+<- base + chunk_size
597 //
598
502 if (executable == EXECUTABLE) { 599 if (executable == EXECUTABLE) {
503 chunk_size = RoundUp(CodePageAreaStartOffset() + body_size, 600 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
504 OS::CommitPageSize()) + CodePageGuardSize(); 601 OS::CommitPageSize()) + CodePageGuardSize();
505 602
506 // Check executable memory limit. 603 // Check executable memory limit.
507 if (size_executable_ + chunk_size > capacity_executable_) { 604 if (size_executable_ + chunk_size > capacity_executable_) {
508 LOG(isolate_, 605 LOG(isolate_,
509 StringEvent("MemoryAllocator::AllocateRawMemory", 606 StringEvent("MemoryAllocator::AllocateRawMemory",
510 "V8 Executable Allocation capacity exceeded")); 607 "V8 Executable Allocation capacity exceeded"));
511 return NULL; 608 return NULL;
512 } 609 }
513 610
611 // Size of header (not executable) plus area (executable).
612 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
613 OS::CommitPageSize());
514 // Allocate executable memory either from code range or from the 614 // Allocate executable memory either from code range or from the
515 // OS. 615 // OS.
516 if (isolate_->code_range()->exists()) { 616 if (isolate_->code_range()->exists()) {
517 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); 617 base = isolate_->code_range()->AllocateRawMemory(chunk_size,
618 commit_size,
619 &chunk_size);
518 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), 620 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
519 MemoryChunk::kAlignment)); 621 MemoryChunk::kAlignment));
520 if (base == NULL) return NULL; 622 if (base == NULL) return NULL;
521 size_ += chunk_size; 623 size_ += chunk_size;
522 // Update executable memory size. 624 // Update executable memory size.
523 size_executable_ += chunk_size; 625 size_executable_ += chunk_size;
524 } else { 626 } else {
525 base = AllocateAlignedMemory(chunk_size, 627 base = AllocateAlignedMemory(chunk_size,
628 commit_size,
526 MemoryChunk::kAlignment, 629 MemoryChunk::kAlignment,
527 executable, 630 executable,
528 &reservation); 631 &reservation);
529 if (base == NULL) return NULL; 632 if (base == NULL) return NULL;
530 // Update executable memory size. 633 // Update executable memory size.
531 size_executable_ += reservation.size(); 634 size_executable_ += reservation.size();
532 } 635 }
533 636
534 if (Heap::ShouldZapGarbage()) { 637 if (Heap::ShouldZapGarbage()) {
535 ZapBlock(base, CodePageGuardStartOffset()); 638 ZapBlock(base, CodePageGuardStartOffset());
536 ZapBlock(base + CodePageAreaStartOffset(), body_size); 639 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
537 } 640 }
538 641
539 area_start = base + CodePageAreaStartOffset(); 642 area_start = base + CodePageAreaStartOffset();
540 area_end = area_start + body_size; 643 area_end = area_start + commit_area_size;
541 } else { 644 } else {
542 chunk_size = MemoryChunk::kObjectStartOffset + body_size; 645 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
646 OS::CommitPageSize());
647 size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
648 commit_area_size, OS::CommitPageSize());
543 base = AllocateAlignedMemory(chunk_size, 649 base = AllocateAlignedMemory(chunk_size,
650 commit_size,
544 MemoryChunk::kAlignment, 651 MemoryChunk::kAlignment,
545 executable, 652 executable,
546 &reservation); 653 &reservation);
547 654
548 if (base == NULL) return NULL; 655 if (base == NULL) return NULL;
549 656
550 if (Heap::ShouldZapGarbage()) { 657 if (Heap::ShouldZapGarbage()) {
551 ZapBlock(base, chunk_size); 658 ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
552 } 659 }
553 660
554 area_start = base + Page::kObjectStartOffset; 661 area_start = base + Page::kObjectStartOffset;
555 area_end = base + chunk_size; 662 area_end = area_start + commit_area_size;
556 } 663 }
557 664
665 // Use chunk_size for statistics and callbacks because we assume that they
666 // treat reserved but not-yet committed memory regions of chunks as allocated.
558 isolate_->counters()->memory_allocated()-> 667 isolate_->counters()->memory_allocated()->
559 Increment(static_cast<int>(chunk_size)); 668 Increment(static_cast<int>(chunk_size));
560 669
561 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); 670 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
562 if (owner != NULL) { 671 if (owner != NULL) {
563 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); 672 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
564 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); 673 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
565 } 674 }
566 675
567 MemoryChunk* result = MemoryChunk::Initialize(heap, 676 MemoryChunk* result = MemoryChunk::Initialize(heap,
568 base, 677 base,
569 chunk_size, 678 chunk_size,
570 area_start, 679 area_start,
571 area_end, 680 area_end,
572 executable, 681 executable,
573 owner); 682 owner);
574 result->set_reserved_memory(&reservation); 683 result->set_reserved_memory(&reservation);
575 return result; 684 return result;
576 } 685 }
577 686
578 687
579 Page* MemoryAllocator::AllocatePage(intptr_t size, 688 Page* MemoryAllocator::AllocatePage(intptr_t size,
580 PagedSpace* owner, 689 PagedSpace* owner,
581 Executability executable) { 690 Executability executable) {
582 MemoryChunk* chunk = AllocateChunk(size, executable, owner); 691 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
583 692
584 if (chunk == NULL) return NULL; 693 if (chunk == NULL) return NULL;
585 694
586 return Page::Initialize(isolate_->heap(), chunk, executable, owner); 695 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
587 } 696 }
588 697
589 698
590 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, 699 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
591 Space* owner, 700 Space* owner,
592 Executability executable) { 701 Executability executable) {
593 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner); 702 MemoryChunk* chunk = AllocateChunk(object_size,
703 object_size,
704 executable,
705 owner);
594 if (chunk == NULL) return NULL; 706 if (chunk == NULL) return NULL;
595 return LargePage::Initialize(isolate_->heap(), chunk); 707 return LargePage::Initialize(isolate_->heap(), chunk);
596 } 708 }
597 709
598 710
599 void MemoryAllocator::Free(MemoryChunk* chunk) { 711 void MemoryAllocator::Free(MemoryChunk* chunk) {
600 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); 712 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
601 if (chunk->owner() != NULL) { 713 if (chunk->owner() != NULL) {
602 ObjectSpace space = 714 ObjectSpace space =
603 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); 715 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after
725 } 837 }
726 838
727 839
728 int MemoryAllocator::CodePageAreaEndOffset() { 840 int MemoryAllocator::CodePageAreaEndOffset() {
729 // We are guarding code pages: the last OS page will be protected as 841 // We are guarding code pages: the last OS page will be protected as
730 // non-writable. 842 // non-writable.
731 return Page::kPageSize - static_cast<int>(OS::CommitPageSize()); 843 return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
732 } 844 }
733 845
734 846
735 bool MemoryAllocator::CommitCodePage(VirtualMemory* vm, 847 bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
736 Address start, 848 Address start,
737 size_t size) { 849 size_t commit_size,
850 size_t reserved_size) {
738 // Commit page header (not executable). 851 // Commit page header (not executable).
739 if (!vm->Commit(start, 852 if (!vm->Commit(start,
740 CodePageGuardStartOffset(), 853 CodePageGuardStartOffset(),
741 false)) { 854 false)) {
742 return false; 855 return false;
743 } 856 }
744 857
745 // Create guard page after the header. 858 // Create guard page after the header.
746 if (!vm->Guard(start + CodePageGuardStartOffset())) { 859 if (!vm->Guard(start + CodePageGuardStartOffset())) {
747 return false; 860 return false;
748 } 861 }
749 862
750 // Commit page body (executable). 863 // Commit page body (executable).
751 size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize();
752 if (!vm->Commit(start + CodePageAreaStartOffset(), 864 if (!vm->Commit(start + CodePageAreaStartOffset(),
753 area_size, 865 commit_size - CodePageGuardStartOffset(),
754 true)) { 866 true)) {
755 return false; 867 return false;
756 } 868 }
757 869
758 // Create guard page after the allocatable area. 870 // Create guard page before the end.
759 if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) { 871 if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
760 return false; 872 return false;
761 } 873 }
762 874
763 return true; 875 return true;
764 } 876 }
765 877
766 878
767 // ----------------------------------------------------------------------------- 879 // -----------------------------------------------------------------------------
768 // MemoryChunk implementation 880 // MemoryChunk implementation
769 881
(...skipping 2215 matching lines...) Expand 10 before | Expand all | Expand 10 after
2985 object->ShortPrint(); 3097 object->ShortPrint();
2986 PrintF("\n"); 3098 PrintF("\n");
2987 } 3099 }
2988 printf(" --------------------------------------\n"); 3100 printf(" --------------------------------------\n");
2989 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3101 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
2990 } 3102 }
2991 3103
2992 #endif // DEBUG 3104 #endif // DEBUG
2993 3105
2994 } } // namespace v8::internal 3106 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/spaces.h ('k') | test/cctest/test-alloc.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698