Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(56)

Side by Side Diff: src/spaces.cc

Issue 9535013: Merge r10809 from the bleeding_edge to the 3.8 branch. (Closed) Base URL: http://v8.googlecode.com/svn/branches/3.8/
Patch Set: Created 8 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
68 68
69 HeapObjectIterator::HeapObjectIterator(Page* page, 69 HeapObjectIterator::HeapObjectIterator(Page* page,
70 HeapObjectCallback size_func) { 70 HeapObjectCallback size_func) {
71 Space* owner = page->owner(); 71 Space* owner = page->owner();
72 ASSERT(owner == HEAP->old_pointer_space() || 72 ASSERT(owner == HEAP->old_pointer_space() ||
73 owner == HEAP->old_data_space() || 73 owner == HEAP->old_data_space() ||
74 owner == HEAP->map_space() || 74 owner == HEAP->map_space() ||
75 owner == HEAP->cell_space() || 75 owner == HEAP->cell_space() ||
76 owner == HEAP->code_space()); 76 owner == HEAP->code_space());
77 Initialize(reinterpret_cast<PagedSpace*>(owner), 77 Initialize(reinterpret_cast<PagedSpace*>(owner),
78 page->ObjectAreaStart(), 78 page->area_start(),
79 page->ObjectAreaEnd(), 79 page->area_end(),
80 kOnePageOnly, 80 kOnePageOnly,
81 size_func); 81 size_func);
82 ASSERT(page->WasSweptPrecisely()); 82 ASSERT(page->WasSweptPrecisely());
83 } 83 }
84 84
85 85
86 void HeapObjectIterator::Initialize(PagedSpace* space, 86 void HeapObjectIterator::Initialize(PagedSpace* space,
87 Address cur, Address end, 87 Address cur, Address end,
88 HeapObjectIterator::PageMode mode, 88 HeapObjectIterator::PageMode mode,
89 HeapObjectCallback size_f) { 89 HeapObjectCallback size_f) {
(...skipping 11 matching lines...) Expand all
101 // We have hit the end of the page and should advance to the next block of 101 // We have hit the end of the page and should advance to the next block of
102 // objects. This happens at the end of the page. 102 // objects. This happens at the end of the page.
103 bool HeapObjectIterator::AdvanceToNextPage() { 103 bool HeapObjectIterator::AdvanceToNextPage() {
104 ASSERT(cur_addr_ == cur_end_); 104 ASSERT(cur_addr_ == cur_end_);
105 if (page_mode_ == kOnePageOnly) return false; 105 if (page_mode_ == kOnePageOnly) return false;
106 Page* cur_page; 106 Page* cur_page;
107 if (cur_addr_ == NULL) { 107 if (cur_addr_ == NULL) {
108 cur_page = space_->anchor(); 108 cur_page = space_->anchor();
109 } else { 109 } else {
110 cur_page = Page::FromAddress(cur_addr_ - 1); 110 cur_page = Page::FromAddress(cur_addr_ - 1);
111 ASSERT(cur_addr_ == cur_page->ObjectAreaEnd()); 111 ASSERT(cur_addr_ == cur_page->area_end());
112 } 112 }
113 cur_page = cur_page->next_page(); 113 cur_page = cur_page->next_page();
114 if (cur_page == space_->anchor()) return false; 114 if (cur_page == space_->anchor()) return false;
115 cur_addr_ = cur_page->ObjectAreaStart(); 115 cur_addr_ = cur_page->area_start();
116 cur_end_ = cur_page->ObjectAreaEnd(); 116 cur_end_ = cur_page->area_end();
117 ASSERT(cur_page->WasSweptPrecisely()); 117 ASSERT(cur_page->WasSweptPrecisely());
118 return true; 118 return true;
119 } 119 }
120 120
121 121
122 // ----------------------------------------------------------------------------- 122 // -----------------------------------------------------------------------------
123 // CodeRange 123 // CodeRange
124 124
125 125
126 CodeRange::CodeRange(Isolate* isolate) 126 CodeRange::CodeRange(Isolate* isolate)
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after
220 size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment); 220 size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment);
221 FreeBlock current = allocation_list_[current_allocation_block_index_]; 221 FreeBlock current = allocation_list_[current_allocation_block_index_];
222 if (aligned_requested >= (current.size - Page::kPageSize)) { 222 if (aligned_requested >= (current.size - Page::kPageSize)) {
223 // Don't leave a small free block, useless for a large object or chunk. 223 // Don't leave a small free block, useless for a large object or chunk.
224 *allocated = current.size; 224 *allocated = current.size;
225 } else { 225 } else {
226 *allocated = aligned_requested; 226 *allocated = aligned_requested;
227 } 227 }
228 ASSERT(*allocated <= current.size); 228 ASSERT(*allocated <= current.size);
229 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); 229 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
230 if (!code_range_->Commit(current.start, *allocated, true)) { 230 if (!MemoryAllocator::CommitCodePage(code_range_,
231 current.start,
232 *allocated)) {
231 *allocated = 0; 233 *allocated = 0;
232 return NULL; 234 return NULL;
233 } 235 }
234 allocation_list_[current_allocation_block_index_].start += *allocated; 236 allocation_list_[current_allocation_block_index_].start += *allocated;
235 allocation_list_[current_allocation_block_index_].size -= *allocated; 237 allocation_list_[current_allocation_block_index_].size -= *allocated;
236 if (*allocated == current.size) { 238 if (*allocated == current.size) {
237 GetNextAllocationBlock(0); // This block is used up, get the next one. 239 GetNextAllocationBlock(0); // This block is used up, get the next one.
238 } 240 }
239 return current.start; 241 return current.start;
240 } 242 }
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after
351 } 353 }
352 354
353 355
354 Address MemoryAllocator::AllocateAlignedMemory(size_t size, 356 Address MemoryAllocator::AllocateAlignedMemory(size_t size,
355 size_t alignment, 357 size_t alignment,
356 Executability executable, 358 Executability executable,
357 VirtualMemory* controller) { 359 VirtualMemory* controller) {
358 VirtualMemory reservation; 360 VirtualMemory reservation;
359 Address base = ReserveAlignedMemory(size, alignment, &reservation); 361 Address base = ReserveAlignedMemory(size, alignment, &reservation);
360 if (base == NULL) return NULL; 362 if (base == NULL) return NULL;
361 if (!reservation.Commit(base, 363
362 size, 364 if (executable == EXECUTABLE) {
363 executable == EXECUTABLE)) { 365 CommitCodePage(&reservation, base, size);
364 return NULL; 366 } else {
367 if (!reservation.Commit(base,
368 size,
369 executable == EXECUTABLE)) {
370 return NULL;
371 }
365 } 372 }
373
366 controller->TakeControl(&reservation); 374 controller->TakeControl(&reservation);
367 return base; 375 return base;
368 } 376 }
369 377
370 378
371 void Page::InitializeAsAnchor(PagedSpace* owner) { 379 void Page::InitializeAsAnchor(PagedSpace* owner) {
372 set_owner(owner); 380 set_owner(owner);
373 set_prev_page(this); 381 set_prev_page(this);
374 set_next_page(this); 382 set_next_page(this);
375 } 383 }
376 384
377 385
378 NewSpacePage* NewSpacePage::Initialize(Heap* heap, 386 NewSpacePage* NewSpacePage::Initialize(Heap* heap,
379 Address start, 387 Address start,
380 SemiSpace* semi_space) { 388 SemiSpace* semi_space) {
389 Address area_start = start + NewSpacePage::kObjectStartOffset;
390 Address area_end = start + Page::kPageSize;
391
381 MemoryChunk* chunk = MemoryChunk::Initialize(heap, 392 MemoryChunk* chunk = MemoryChunk::Initialize(heap,
382 start, 393 start,
383 Page::kPageSize, 394 Page::kPageSize,
395 area_start,
396 area_end,
384 NOT_EXECUTABLE, 397 NOT_EXECUTABLE,
385 semi_space); 398 semi_space);
386 chunk->set_next_chunk(NULL); 399 chunk->set_next_chunk(NULL);
387 chunk->set_prev_chunk(NULL); 400 chunk->set_prev_chunk(NULL);
388 chunk->initialize_scan_on_scavenge(true); 401 chunk->initialize_scan_on_scavenge(true);
389 bool in_to_space = (semi_space->id() != kFromSpace); 402 bool in_to_space = (semi_space->id() != kFromSpace);
390 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE 403 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
391 : MemoryChunk::IN_FROM_SPACE); 404 : MemoryChunk::IN_FROM_SPACE);
392 ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE 405 ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
393 : MemoryChunk::IN_TO_SPACE)); 406 : MemoryChunk::IN_TO_SPACE));
394 NewSpacePage* page = static_cast<NewSpacePage*>(chunk); 407 NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
395 heap->incremental_marking()->SetNewSpacePageFlags(page); 408 heap->incremental_marking()->SetNewSpacePageFlags(page);
396 return page; 409 return page;
397 } 410 }
398 411
399 412
400 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) { 413 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
401 set_owner(semi_space); 414 set_owner(semi_space);
402 set_next_chunk(this); 415 set_next_chunk(this);
403 set_prev_chunk(this); 416 set_prev_chunk(this);
404 // Flags marks this invalid page as not being in new-space. 417 // Flags marks this invalid page as not being in new-space.
405 // All real new-space pages will be in new-space. 418 // All real new-space pages will be in new-space.
406 SetFlags(0, ~0); 419 SetFlags(0, ~0);
407 } 420 }
408 421
409 422
410 MemoryChunk* MemoryChunk::Initialize(Heap* heap, 423 MemoryChunk* MemoryChunk::Initialize(Heap* heap,
411 Address base, 424 Address base,
412 size_t size, 425 size_t size,
426 Address area_start,
427 Address area_end,
413 Executability executable, 428 Executability executable,
414 Space* owner) { 429 Space* owner) {
415 MemoryChunk* chunk = FromAddress(base); 430 MemoryChunk* chunk = FromAddress(base);
416 431
417 ASSERT(base == chunk->address()); 432 ASSERT(base == chunk->address());
418 433
419 chunk->heap_ = heap; 434 chunk->heap_ = heap;
420 chunk->size_ = size; 435 chunk->size_ = size;
436 chunk->area_start_ = area_start;
437 chunk->area_end_ = area_end;
421 chunk->flags_ = 0; 438 chunk->flags_ = 0;
422 chunk->set_owner(owner); 439 chunk->set_owner(owner);
423 chunk->InitializeReservedMemory(); 440 chunk->InitializeReservedMemory();
424 chunk->slots_buffer_ = NULL; 441 chunk->slots_buffer_ = NULL;
425 chunk->skip_list_ = NULL; 442 chunk->skip_list_ = NULL;
426 chunk->ResetLiveBytes(); 443 chunk->ResetLiveBytes();
427 Bitmap::Clear(chunk); 444 Bitmap::Clear(chunk);
428 chunk->initialize_scan_on_scavenge(false); 445 chunk->initialize_scan_on_scavenge(false);
429 chunk->SetFlag(WAS_SWEPT_PRECISELY); 446 chunk->SetFlag(WAS_SWEPT_PRECISELY);
430 447
431 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); 448 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
432 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); 449 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
433 450
434 if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE); 451 if (executable == EXECUTABLE) {
452 chunk->SetFlag(IS_EXECUTABLE);
453 }
435 454
436 if (owner == heap->old_data_space()) chunk->SetFlag(CONTAINS_ONLY_DATA); 455 if (owner == heap->old_data_space()) {
456 chunk->SetFlag(CONTAINS_ONLY_DATA);
457 }
437 458
438 return chunk; 459 return chunk;
439 } 460 }
440 461
441 462
442 void MemoryChunk::InsertAfter(MemoryChunk* other) { 463 void MemoryChunk::InsertAfter(MemoryChunk* other) {
443 next_chunk_ = other->next_chunk_; 464 next_chunk_ = other->next_chunk_;
444 prev_chunk_ = other; 465 prev_chunk_ = other;
445 other->next_chunk_->prev_chunk_ = this; 466 other->next_chunk_->prev_chunk_ = this;
446 other->next_chunk_ = this; 467 other->next_chunk_ = this;
447 } 468 }
448 469
449 470
450 void MemoryChunk::Unlink() { 471 void MemoryChunk::Unlink() {
451 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) { 472 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
452 heap_->decrement_scan_on_scavenge_pages(); 473 heap_->decrement_scan_on_scavenge_pages();
453 ClearFlag(SCAN_ON_SCAVENGE); 474 ClearFlag(SCAN_ON_SCAVENGE);
454 } 475 }
455 next_chunk_->prev_chunk_ = prev_chunk_; 476 next_chunk_->prev_chunk_ = prev_chunk_;
456 prev_chunk_->next_chunk_ = next_chunk_; 477 prev_chunk_->next_chunk_ = next_chunk_;
457 prev_chunk_ = NULL; 478 prev_chunk_ = NULL;
458 next_chunk_ = NULL; 479 next_chunk_ = NULL;
459 } 480 }
460 481
461 482
462 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, 483 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
463 Executability executable, 484 Executability executable,
464 Space* owner) { 485 Space* owner) {
465 size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size; 486 size_t chunk_size;
466 Heap* heap = isolate_->heap(); 487 Heap* heap = isolate_->heap();
467 Address base = NULL; 488 Address base = NULL;
468 VirtualMemory reservation; 489 VirtualMemory reservation;
490 Address area_start = NULL;
491 Address area_end = NULL;
469 if (executable == EXECUTABLE) { 492 if (executable == EXECUTABLE) {
493 chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
494 OS::CommitPageSize()) + CodePageGuardSize();
495
470 // Check executable memory limit. 496 // Check executable memory limit.
471 if (size_executable_ + chunk_size > capacity_executable_) { 497 if (size_executable_ + chunk_size > capacity_executable_) {
472 LOG(isolate_, 498 LOG(isolate_,
473 StringEvent("MemoryAllocator::AllocateRawMemory", 499 StringEvent("MemoryAllocator::AllocateRawMemory",
474 "V8 Executable Allocation capacity exceeded")); 500 "V8 Executable Allocation capacity exceeded"));
475 return NULL; 501 return NULL;
476 } 502 }
477 503
478 // Allocate executable memory either from code range or from the 504 // Allocate executable memory either from code range or from the
479 // OS. 505 // OS.
480 if (isolate_->code_range()->exists()) { 506 if (isolate_->code_range()->exists()) {
481 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); 507 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
482 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), 508 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
483 MemoryChunk::kAlignment)); 509 MemoryChunk::kAlignment));
484 if (base == NULL) return NULL; 510 if (base == NULL) return NULL;
485 size_ += chunk_size; 511 size_ += chunk_size;
486 // Update executable memory size. 512 // Update executable memory size.
487 size_executable_ += chunk_size; 513 size_executable_ += chunk_size;
488 } else { 514 } else {
489 base = AllocateAlignedMemory(chunk_size, 515 base = AllocateAlignedMemory(chunk_size,
490 MemoryChunk::kAlignment, 516 MemoryChunk::kAlignment,
491 executable, 517 executable,
492 &reservation); 518 &reservation);
493 if (base == NULL) return NULL; 519 if (base == NULL) return NULL;
494 // Update executable memory size. 520 // Update executable memory size.
495 size_executable_ += reservation.size(); 521 size_executable_ += reservation.size();
496 } 522 }
523
524 #ifdef DEBUG
525 ZapBlock(base, CodePageGuardStartOffset());
526 ZapBlock(base + CodePageAreaStartOffset(), body_size);
527 #endif
528 area_start = base + CodePageAreaStartOffset();
529 area_end = area_start + body_size;
497 } else { 530 } else {
531 chunk_size = MemoryChunk::kObjectStartOffset + body_size;
498 base = AllocateAlignedMemory(chunk_size, 532 base = AllocateAlignedMemory(chunk_size,
499 MemoryChunk::kAlignment, 533 MemoryChunk::kAlignment,
500 executable, 534 executable,
501 &reservation); 535 &reservation);
502 536
503 if (base == NULL) return NULL; 537 if (base == NULL) return NULL;
538
539 #ifdef DEBUG
540 ZapBlock(base, chunk_size);
541 #endif
542
543 area_start = base + Page::kObjectStartOffset;
544 area_end = base + chunk_size;
504 } 545 }
505 546
506 #ifdef DEBUG
507 ZapBlock(base, chunk_size);
508 #endif
509 isolate_->counters()->memory_allocated()-> 547 isolate_->counters()->memory_allocated()->
510 Increment(static_cast<int>(chunk_size)); 548 Increment(static_cast<int>(chunk_size));
511 549
512 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); 550 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
513 if (owner != NULL) { 551 if (owner != NULL) {
514 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); 552 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
515 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); 553 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
516 } 554 }
517 555
518 MemoryChunk* result = MemoryChunk::Initialize(heap, 556 MemoryChunk* result = MemoryChunk::Initialize(heap,
519 base, 557 base,
520 chunk_size, 558 chunk_size,
559 area_start,
560 area_end,
521 executable, 561 executable,
522 owner); 562 owner);
523 result->set_reserved_memory(&reservation); 563 result->set_reserved_memory(&reservation);
524 return result; 564 return result;
525 } 565 }
526 566
527 567
528 Page* MemoryAllocator::AllocatePage(PagedSpace* owner, 568 Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
529 Executability executable) { 569 Executability executable) {
530 MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner); 570 MemoryChunk* chunk = AllocateChunk(owner->AreaSize(),
571 executable,
572 owner);
531 573
532 if (chunk == NULL) return NULL; 574 if (chunk == NULL) return NULL;
533 575
534 return Page::Initialize(isolate_->heap(), chunk, executable, owner); 576 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
535 } 577 }
536 578
537 579
538 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, 580 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
539 Executability executable, 581 Executability executable,
540 Space* owner) { 582 Space* owner) {
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
641 #ifdef DEBUG 683 #ifdef DEBUG
642 void MemoryAllocator::ReportStatistics() { 684 void MemoryAllocator::ReportStatistics() {
643 float pct = static_cast<float>(capacity_ - size_) / capacity_; 685 float pct = static_cast<float>(capacity_ - size_) / capacity_;
644 PrintF(" capacity: %" V8_PTR_PREFIX "d" 686 PrintF(" capacity: %" V8_PTR_PREFIX "d"
645 ", used: %" V8_PTR_PREFIX "d" 687 ", used: %" V8_PTR_PREFIX "d"
646 ", available: %%%d\n\n", 688 ", available: %%%d\n\n",
647 capacity_, size_, static_cast<int>(pct*100)); 689 capacity_, size_, static_cast<int>(pct*100));
648 } 690 }
649 #endif 691 #endif
650 692
693
694 int MemoryAllocator::CodePageGuardStartOffset() {
695 // We are guarding code pages: the first OS page after the header
696 // will be protected as non-writable.
697 return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
698 }
699
700
701 int MemoryAllocator::CodePageGuardSize() {
702 return OS::CommitPageSize();
703 }
704
705
706 int MemoryAllocator::CodePageAreaStartOffset() {
707 // We are guarding code pages: the first OS page after the header
708 // will be protected as non-writable.
709 return CodePageGuardStartOffset() + CodePageGuardSize();
710 }
711
712
713 int MemoryAllocator::CodePageAreaEndOffset() {
714 // We are guarding code pages: the last OS page will be protected as
715 // non-writable.
716 return Page::kPageSize - OS::CommitPageSize();
717 }
718
719
720 bool MemoryAllocator::CommitCodePage(VirtualMemory* vm,
721 Address start,
722 size_t size) {
723 // Commit page header (not executable).
724 if (!vm->Commit(start,
725 CodePageGuardStartOffset(),
726 false)) {
727 return false;
728 }
729
730 // Create guard page after the header.
731 if (!vm->Guard(start + CodePageGuardStartOffset())) {
732 return false;
733 }
734
735 // Commit page body (executable).
736 size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize();
737 if (!vm->Commit(start + CodePageAreaStartOffset(),
738 area_size,
739 true)) {
740 return false;
741 }
742
743 // Create guard page after the allocatable area.
744 if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) {
745 return false;
746 }
747
748 return true;
749 }
750
751
651 // ----------------------------------------------------------------------------- 752 // -----------------------------------------------------------------------------
652 // MemoryChunk implementation 753 // MemoryChunk implementation
653 754
654 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) { 755 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
655 MemoryChunk* chunk = MemoryChunk::FromAddress(address); 756 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
656 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) { 757 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
657 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by); 758 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
658 } 759 }
659 chunk->IncrementLiveBytes(by); 760 chunk->IncrementLiveBytes(by);
660 } 761 }
661 762
662 // ----------------------------------------------------------------------------- 763 // -----------------------------------------------------------------------------
663 // PagedSpace implementation 764 // PagedSpace implementation
664 765
665 PagedSpace::PagedSpace(Heap* heap, 766 PagedSpace::PagedSpace(Heap* heap,
666 intptr_t max_capacity, 767 intptr_t max_capacity,
667 AllocationSpace id, 768 AllocationSpace id,
668 Executability executable) 769 Executability executable)
669 : Space(heap, id, executable), 770 : Space(heap, id, executable),
670 free_list_(this), 771 free_list_(this),
671 was_swept_conservatively_(false), 772 was_swept_conservatively_(false),
672 first_unswept_page_(Page::FromAddress(NULL)), 773 first_unswept_page_(Page::FromAddress(NULL)),
673 unswept_free_bytes_(0) { 774 unswept_free_bytes_(0) {
775 if (id == CODE_SPACE) {
776 area_size_ = heap->isolate()->memory_allocator()->
777 CodePageAreaSize();
778 } else {
779 area_size_ = Page::kPageSize - Page::kObjectStartOffset;
780 }
674 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) 781 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
675 * Page::kObjectAreaSize; 782 * AreaSize();
676 accounting_stats_.Clear(); 783 accounting_stats_.Clear();
677 784
678 allocation_info_.top = NULL; 785 allocation_info_.top = NULL;
679 allocation_info_.limit = NULL; 786 allocation_info_.limit = NULL;
680 787
681 anchor_.InitializeAsAnchor(this); 788 anchor_.InitializeAsAnchor(this);
682 } 789 }
683 790
684 791
685 bool PagedSpace::SetUp() { 792 bool PagedSpace::SetUp() {
(...skipping 29 matching lines...) Expand all
715 Address cur = obj->address(); 822 Address cur = obj->address();
716 Address next = cur + obj->Size(); 823 Address next = cur + obj->Size();
717 if ((cur <= addr) && (addr < next)) return obj; 824 if ((cur <= addr) && (addr < next)) return obj;
718 } 825 }
719 826
720 UNREACHABLE(); 827 UNREACHABLE();
721 return Failure::Exception(); 828 return Failure::Exception();
722 } 829 }
723 830
724 bool PagedSpace::CanExpand() { 831 bool PagedSpace::CanExpand() {
725 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); 832 ASSERT(max_capacity_ % AreaSize() == 0);
726 ASSERT(Capacity() % Page::kObjectAreaSize == 0); 833 ASSERT(Capacity() % AreaSize() == 0);
727 834
728 if (Capacity() == max_capacity_) return false; 835 if (Capacity() == max_capacity_) return false;
729 836
730 ASSERT(Capacity() < max_capacity_); 837 ASSERT(Capacity() < max_capacity_);
731 838
732 // Are we going to exceed capacity for this space? 839 // Are we going to exceed capacity for this space?
733 if ((Capacity() + Page::kPageSize) > max_capacity_) return false; 840 if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
734 841
735 return true; 842 return true;
736 } 843 }
(...skipping 19 matching lines...) Expand all
756 while (it.has_next()) { 863 while (it.has_next()) {
757 it.next(); 864 it.next();
758 count++; 865 count++;
759 } 866 }
760 return count; 867 return count;
761 } 868 }
762 869
763 870
764 void PagedSpace::ReleasePage(Page* page) { 871 void PagedSpace::ReleasePage(Page* page) {
765 ASSERT(page->LiveBytes() == 0); 872 ASSERT(page->LiveBytes() == 0);
873 ASSERT(AreaSize() == page->area_size());
766 874
767 // Adjust list of unswept pages if the page is the head of the list. 875 // Adjust list of unswept pages if the page is the head of the list.
768 if (first_unswept_page_ == page) { 876 if (first_unswept_page_ == page) {
769 first_unswept_page_ = page->next_page(); 877 first_unswept_page_ = page->next_page();
770 if (first_unswept_page_ == anchor()) { 878 if (first_unswept_page_ == anchor()) {
771 first_unswept_page_ = Page::FromAddress(NULL); 879 first_unswept_page_ = Page::FromAddress(NULL);
772 } 880 }
773 } 881 }
774 882
775 if (page->WasSwept()) { 883 if (page->WasSwept()) {
776 intptr_t size = free_list_.EvictFreeListItems(page); 884 intptr_t size = free_list_.EvictFreeListItems(page);
777 accounting_stats_.AllocateBytes(size); 885 accounting_stats_.AllocateBytes(size);
778 ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size)); 886 ASSERT_EQ(AreaSize(), static_cast<int>(size));
779 } else { 887 } else {
780 DecreaseUnsweptFreeBytes(page); 888 DecreaseUnsweptFreeBytes(page);
781 } 889 }
782 890
783 if (Page::FromAllocationTop(allocation_info_.top) == page) { 891 if (Page::FromAllocationTop(allocation_info_.top) == page) {
784 allocation_info_.top = allocation_info_.limit = NULL; 892 allocation_info_.top = allocation_info_.limit = NULL;
785 } 893 }
786 894
787 page->Unlink(); 895 page->Unlink();
788 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { 896 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
789 heap()->isolate()->memory_allocator()->Free(page); 897 heap()->isolate()->memory_allocator()->Free(page);
790 } else { 898 } else {
791 heap()->QueueMemoryChunkForFree(page); 899 heap()->QueueMemoryChunkForFree(page);
792 } 900 }
793 901
794 ASSERT(Capacity() > 0); 902 ASSERT(Capacity() > 0);
795 ASSERT(Capacity() % Page::kObjectAreaSize == 0); 903 ASSERT(Capacity() % AreaSize() == 0);
796 accounting_stats_.ShrinkSpace(Page::kObjectAreaSize); 904 accounting_stats_.ShrinkSpace(AreaSize());
797 } 905 }
798 906
799 907
800 void PagedSpace::ReleaseAllUnusedPages() { 908 void PagedSpace::ReleaseAllUnusedPages() {
801 PageIterator it(this); 909 PageIterator it(this);
802 while (it.has_next()) { 910 while (it.has_next()) {
803 Page* page = it.next(); 911 Page* page = it.next();
804 if (!page->WasSwept()) { 912 if (!page->WasSwept()) {
805 if (page->LiveBytes() == 0) ReleasePage(page); 913 if (page->LiveBytes() == 0) ReleasePage(page);
806 } else { 914 } else {
807 HeapObject* obj = HeapObject::FromAddress(page->body()); 915 HeapObject* obj = HeapObject::FromAddress(page->area_start());
808 if (obj->IsFreeSpace() && 916 if (obj->IsFreeSpace() &&
809 FreeSpace::cast(obj)->size() == Page::kObjectAreaSize) { 917 FreeSpace::cast(obj)->size() == AreaSize()) {
810 // Sometimes we allocate memory from free list but don't 918 // Sometimes we allocate memory from free list but don't
811 // immediately initialize it (e.g. see PagedSpace::ReserveSpace 919 // immediately initialize it (e.g. see PagedSpace::ReserveSpace
812 // called from Heap::ReserveSpace that can cause GC before 920 // called from Heap::ReserveSpace that can cause GC before
813 // reserved space is actually initialized). 921 // reserved space is actually initialized).
814 // Thus we can't simply assume that obj represents a valid 922 // Thus we can't simply assume that obj represents a valid
815 // node still owned by a free list 923 // node still owned by a free list
816 // Instead we should verify that the page is fully covered 924 // Instead we should verify that the page is fully covered
817 // by free list items. 925 // by free list items.
818 FreeList::SizeStats sizes; 926 FreeList::SizeStats sizes;
819 free_list_.CountFreeListItems(page, &sizes); 927 free_list_.CountFreeListItems(page, &sizes);
820 if (sizes.Total() == Page::kObjectAreaSize) { 928 if (sizes.Total() == AreaSize()) {
821 ReleasePage(page); 929 ReleasePage(page);
822 } 930 }
823 } 931 }
824 } 932 }
825 } 933 }
826 heap()->FreeQueuedChunks(); 934 heap()->FreeQueuedChunks();
827 } 935 }
828 936
829 937
830 #ifdef DEBUG 938 #ifdef DEBUG
(...skipping 10 matching lines...) Expand all
841 (allocation_info_.top == allocation_info_.limit); 949 (allocation_info_.top == allocation_info_.limit);
842 PageIterator page_iterator(this); 950 PageIterator page_iterator(this);
843 while (page_iterator.has_next()) { 951 while (page_iterator.has_next()) {
844 Page* page = page_iterator.next(); 952 Page* page = page_iterator.next();
845 ASSERT(page->owner() == this); 953 ASSERT(page->owner() == this);
846 if (page == Page::FromAllocationTop(allocation_info_.top)) { 954 if (page == Page::FromAllocationTop(allocation_info_.top)) {
847 allocation_pointer_found_in_space = true; 955 allocation_pointer_found_in_space = true;
848 } 956 }
849 ASSERT(page->WasSweptPrecisely()); 957 ASSERT(page->WasSweptPrecisely());
850 HeapObjectIterator it(page, NULL); 958 HeapObjectIterator it(page, NULL);
851 Address end_of_previous_object = page->ObjectAreaStart(); 959 Address end_of_previous_object = page->area_start();
852 Address top = page->ObjectAreaEnd(); 960 Address top = page->area_end();
853 int black_size = 0; 961 int black_size = 0;
854 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { 962 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
855 ASSERT(end_of_previous_object <= object->address()); 963 ASSERT(end_of_previous_object <= object->address());
856 964
857 // The first word should be a map, and we expect all map pointers to 965 // The first word should be a map, and we expect all map pointers to
858 // be in map space. 966 // be in map space.
859 Map* map = object->map(); 967 Map* map = object->map();
860 ASSERT(map->IsMap()); 968 ASSERT(map->IsMap());
861 ASSERT(heap()->map_space()->Contains(map)); 969 ASSERT(heap()->map_space()->Contains(map));
862 970
(...skipping 191 matching lines...) Expand 10 before | Expand all | Expand 10 after
1054 // TODO(gc): Change the limit on new-space allocation to prevent this 1162 // TODO(gc): Change the limit on new-space allocation to prevent this
1055 // from happening (all such allocations should go directly to LOSpace). 1163 // from happening (all such allocations should go directly to LOSpace).
1056 return false; 1164 return false;
1057 } 1165 }
1058 if (!to_space_.AdvancePage()) { 1166 if (!to_space_.AdvancePage()) {
1059 // Failed to get a new page in to-space. 1167 // Failed to get a new page in to-space.
1060 return false; 1168 return false;
1061 } 1169 }
1062 1170
1063 // Clear remainder of current page. 1171 // Clear remainder of current page.
1064 Address limit = NewSpacePage::FromLimit(top)->body_limit(); 1172 Address limit = NewSpacePage::FromLimit(top)->area_end();
1065 if (heap()->gc_state() == Heap::SCAVENGE) { 1173 if (heap()->gc_state() == Heap::SCAVENGE) {
1066 heap()->promotion_queue()->SetNewLimit(limit); 1174 heap()->promotion_queue()->SetNewLimit(limit);
1067 heap()->promotion_queue()->ActivateGuardIfOnTheSamePage(); 1175 heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
1068 } 1176 }
1069 1177
1070 int remaining_in_page = static_cast<int>(limit - top); 1178 int remaining_in_page = static_cast<int>(limit - top);
1071 heap()->CreateFillerObjectAt(top, remaining_in_page); 1179 heap()->CreateFillerObjectAt(top, remaining_in_page);
1072 pages_used_++; 1180 pages_used_++;
1073 UpdateAllocationInfo(); 1181 UpdateAllocationInfo();
1074 1182
(...skipping 29 matching lines...) Expand all
1104 1212
1105 #ifdef DEBUG 1213 #ifdef DEBUG
1106 // We do not use the SemiSpaceIterator because verification doesn't assume 1214 // We do not use the SemiSpaceIterator because verification doesn't assume
1107 // that it works (it depends on the invariants we are checking). 1215 // that it works (it depends on the invariants we are checking).
1108 void NewSpace::Verify() { 1216 void NewSpace::Verify() {
1109 // The allocation pointer should be in the space or at the very end. 1217 // The allocation pointer should be in the space or at the very end.
1110 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); 1218 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1111 1219
1112 // There should be objects packed in from the low address up to the 1220 // There should be objects packed in from the low address up to the
1113 // allocation pointer. 1221 // allocation pointer.
1114 Address current = to_space_.first_page()->body(); 1222 Address current = to_space_.first_page()->area_start();
1115 CHECK_EQ(current, to_space_.space_start()); 1223 CHECK_EQ(current, to_space_.space_start());
1116 1224
1117 while (current != top()) { 1225 while (current != top()) {
1118 if (!NewSpacePage::IsAtEnd(current)) { 1226 if (!NewSpacePage::IsAtEnd(current)) {
1119 // The allocation pointer should not be in the middle of an object. 1227 // The allocation pointer should not be in the middle of an object.
1120 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) || 1228 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1121 current < top()); 1229 current < top());
1122 1230
1123 HeapObject* object = HeapObject::FromAddress(current); 1231 HeapObject* object = HeapObject::FromAddress(current);
1124 1232
(...skipping 14 matching lines...) Expand all
1139 VerifyPointersVisitor visitor; 1247 VerifyPointersVisitor visitor;
1140 int size = object->Size(); 1248 int size = object->Size();
1141 object->IterateBody(map->instance_type(), size, &visitor); 1249 object->IterateBody(map->instance_type(), size, &visitor);
1142 1250
1143 current += size; 1251 current += size;
1144 } else { 1252 } else {
1145 // At end of page, switch to next page. 1253 // At end of page, switch to next page.
1146 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page(); 1254 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1147 // Next page should be valid. 1255 // Next page should be valid.
1148 CHECK(!page->is_anchor()); 1256 CHECK(!page->is_anchor());
1149 current = page->body(); 1257 current = page->area_start();
1150 } 1258 }
1151 } 1259 }
1152 1260
1153 // Check semi-spaces. 1261 // Check semi-spaces.
1154 ASSERT_EQ(from_space_.id(), kFromSpace); 1262 ASSERT_EQ(from_space_.id(), kFromSpace);
1155 ASSERT_EQ(to_space_.id(), kToSpace); 1263 ASSERT_EQ(to_space_.id(), kToSpace);
1156 from_space_.Verify(); 1264 from_space_.Verify();
1157 to_space_.Verify(); 1265 to_space_.Verify();
1158 } 1266 }
1159 #endif 1267 #endif
(...skipping 765 matching lines...) Expand 10 before | Expand all | Expand 10 after
1925 sum += free_space->Size(); 2033 sum += free_space->Size();
1926 } 2034 }
1927 n = n->next(); 2035 n = n->next();
1928 } 2036 }
1929 return sum; 2037 return sum;
1930 } 2038 }
1931 2039
1932 2040
1933 void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) { 2041 void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
1934 sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p); 2042 sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p);
1935 if (sizes->huge_size_ < Page::kObjectAreaSize) { 2043 if (sizes->huge_size_ < p->area_size()) {
1936 sizes->small_size_ = CountFreeListItemsInList(small_list_, p); 2044 sizes->small_size_ = CountFreeListItemsInList(small_list_, p);
1937 sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p); 2045 sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p);
1938 sizes->large_size_ = CountFreeListItemsInList(large_list_, p); 2046 sizes->large_size_ = CountFreeListItemsInList(large_list_, p);
1939 } else { 2047 } else {
1940 sizes->small_size_ = 0; 2048 sizes->small_size_ = 0;
1941 sizes->medium_size_ = 0; 2049 sizes->medium_size_ = 0;
1942 sizes->large_size_ = 0; 2050 sizes->large_size_ = 0;
1943 } 2051 }
1944 } 2052 }
1945 2053
1946 2054
1947 static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) { 2055 static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) {
1948 intptr_t sum = 0; 2056 intptr_t sum = 0;
1949 while (*n != NULL) { 2057 while (*n != NULL) {
1950 if (Page::FromAddress((*n)->address()) == p) { 2058 if (Page::FromAddress((*n)->address()) == p) {
1951 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n); 2059 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
1952 sum += free_space->Size(); 2060 sum += free_space->Size();
1953 *n = (*n)->next(); 2061 *n = (*n)->next();
1954 } else { 2062 } else {
1955 n = (*n)->next_address(); 2063 n = (*n)->next_address();
1956 } 2064 }
1957 } 2065 }
1958 return sum; 2066 return sum;
1959 } 2067 }
1960 2068
1961 2069
1962 intptr_t FreeList::EvictFreeListItems(Page* p) { 2070 intptr_t FreeList::EvictFreeListItems(Page* p) {
1963 intptr_t sum = EvictFreeListItemsInList(&huge_list_, p); 2071 intptr_t sum = EvictFreeListItemsInList(&huge_list_, p);
1964 2072
1965 if (sum < Page::kObjectAreaSize) { 2073 if (sum < p->area_size()) {
1966 sum += EvictFreeListItemsInList(&small_list_, p) + 2074 sum += EvictFreeListItemsInList(&small_list_, p) +
1967 EvictFreeListItemsInList(&medium_list_, p) + 2075 EvictFreeListItemsInList(&medium_list_, p) +
1968 EvictFreeListItemsInList(&large_list_, p); 2076 EvictFreeListItemsInList(&large_list_, p);
1969 } 2077 }
1970 2078
1971 available_ -= static_cast<int>(sum); 2079 available_ -= static_cast<int>(sum);
1972 2080
1973 return sum; 2081 return sum;
1974 } 2082 }
1975 2083
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after
2077 } 2185 }
2078 first_unswept_page_ = Page::FromAddress(NULL); 2186 first_unswept_page_ = Page::FromAddress(NULL);
2079 unswept_free_bytes_ = 0; 2187 unswept_free_bytes_ = 0;
2080 2188
2081 // Clear the free list before a full GC---it will be rebuilt afterward. 2189 // Clear the free list before a full GC---it will be rebuilt afterward.
2082 free_list_.Reset(); 2190 free_list_.Reset();
2083 } 2191 }
2084 2192
2085 2193
2086 bool PagedSpace::ReserveSpace(int size_in_bytes) { 2194 bool PagedSpace::ReserveSpace(int size_in_bytes) {
2087 ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize); 2195 ASSERT(size_in_bytes <= AreaSize());
2088 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes)); 2196 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
2089 Address current_top = allocation_info_.top; 2197 Address current_top = allocation_info_.top;
2090 Address new_top = current_top + size_in_bytes; 2198 Address new_top = current_top + size_in_bytes;
2091 if (new_top <= allocation_info_.limit) return true; 2199 if (new_top <= allocation_info_.limit) return true;
2092 2200
2093 HeapObject* new_area = free_list_.Allocate(size_in_bytes); 2201 HeapObject* new_area = free_list_.Allocate(size_in_bytes);
2094 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes); 2202 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
2095 if (new_area == NULL) return false; 2203 if (new_area == NULL) return false;
2096 2204
2097 int old_linear_size = static_cast<int>(limit() - top()); 2205 int old_linear_size = static_cast<int>(limit() - top());
(...skipping 359 matching lines...) Expand 10 before | Expand all | Expand 10 after
2457 return Failure::RetryAfterGC(identity()); 2565 return Failure::RetryAfterGC(identity());
2458 } 2566 }
2459 2567
2460 if (Size() + object_size > max_capacity_) { 2568 if (Size() + object_size > max_capacity_) {
2461 return Failure::RetryAfterGC(identity()); 2569 return Failure::RetryAfterGC(identity());
2462 } 2570 }
2463 2571
2464 LargePage* page = heap()->isolate()->memory_allocator()-> 2572 LargePage* page = heap()->isolate()->memory_allocator()->
2465 AllocateLargePage(object_size, executable, this); 2573 AllocateLargePage(object_size, executable, this);
2466 if (page == NULL) return Failure::RetryAfterGC(identity()); 2574 if (page == NULL) return Failure::RetryAfterGC(identity());
2467 ASSERT(page->body_size() >= object_size); 2575 ASSERT(page->area_size() >= object_size);
2468 2576
2469 size_ += static_cast<int>(page->size()); 2577 size_ += static_cast<int>(page->size());
2470 objects_size_ += object_size; 2578 objects_size_ += object_size;
2471 page_count_++; 2579 page_count_++;
2472 page->set_next_page(first_page_); 2580 page->set_next_page(first_page_);
2473 first_page_ = page; 2581 first_page_ = page;
2474 2582
2475 HeapObject* object = page->GetObject(); 2583 HeapObject* object = page->GetObject();
2476 2584
2477 #ifdef DEBUG 2585 #ifdef DEBUG
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
2573 // We do not assume that the large object iterator works, because it depends 2681 // We do not assume that the large object iterator works, because it depends
2574 // on the invariants we are checking during verification. 2682 // on the invariants we are checking during verification.
2575 void LargeObjectSpace::Verify() { 2683 void LargeObjectSpace::Verify() {
2576 for (LargePage* chunk = first_page_; 2684 for (LargePage* chunk = first_page_;
2577 chunk != NULL; 2685 chunk != NULL;
2578 chunk = chunk->next_page()) { 2686 chunk = chunk->next_page()) {
2579 // Each chunk contains an object that starts at the large object page's 2687 // Each chunk contains an object that starts at the large object page's
2580 // object area start. 2688 // object area start.
2581 HeapObject* object = chunk->GetObject(); 2689 HeapObject* object = chunk->GetObject();
2582 Page* page = Page::FromAddress(object->address()); 2690 Page* page = Page::FromAddress(object->address());
2583 ASSERT(object->address() == page->ObjectAreaStart()); 2691 ASSERT(object->address() == page->area_start());
2584 2692
2585 // The first word should be a map, and we expect all map pointers to be 2693 // The first word should be a map, and we expect all map pointers to be
2586 // in map space. 2694 // in map space.
2587 Map* map = object->map(); 2695 Map* map = object->map();
2588 ASSERT(map->IsMap()); 2696 ASSERT(map->IsMap());
2589 ASSERT(heap()->map_space()->Contains(map)); 2697 ASSERT(heap()->map_space()->Contains(map));
2590 2698
2591 // We have only code, sequential strings, external strings 2699 // We have only code, sequential strings, external strings
2592 // (sequential strings that have been morphed into external 2700 // (sequential strings that have been morphed into external
2593 // strings), fixed arrays, and byte arrays in large object space. 2701 // strings), fixed arrays, and byte arrays in large object space.
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
2674 object->ShortPrint(); 2782 object->ShortPrint();
2675 PrintF("\n"); 2783 PrintF("\n");
2676 } 2784 }
2677 printf(" --------------------------------------\n"); 2785 printf(" --------------------------------------\n");
2678 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 2786 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
2679 } 2787 }
2680 2788
2681 #endif // DEBUG 2789 #endif // DEBUG
2682 2790
2683 } } // namespace v8::internal 2791 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698