Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(144)

Side by Side Diff: src/spaces.cc

Issue 9289047: Reduce boot-up memory use of V8. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 8 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 13 matching lines...) Expand all
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #include "v8.h" 28 #include "v8.h"
29 29
30 #include "liveobjectlist-inl.h" 30 #include "liveobjectlist-inl.h"
31 #include "macro-assembler.h" 31 #include "macro-assembler.h"
32 #include "mark-compact.h" 32 #include "mark-compact.h"
33 #include "platform.h" 33 #include "platform.h"
34 #include "snapshot.h"
34 35
35 namespace v8 { 36 namespace v8 {
36 namespace internal { 37 namespace internal {
37 38
38 39
39 // ---------------------------------------------------------------------------- 40 // ----------------------------------------------------------------------------
40 // HeapObjectIterator 41 // HeapObjectIterator
41 42
42 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { 43 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
43 // You can't actually iterate over the anchor page. It is not a real page, 44 // You can't actually iterate over the anchor page. It is not a real page,
(...skipping 212 matching lines...) Expand 10 before | Expand all | Expand 10 after
256 257
257 258
258 // ----------------------------------------------------------------------------- 259 // -----------------------------------------------------------------------------
259 // MemoryAllocator 260 // MemoryAllocator
260 // 261 //
261 262
262 MemoryAllocator::MemoryAllocator(Isolate* isolate) 263 MemoryAllocator::MemoryAllocator(Isolate* isolate)
263 : isolate_(isolate), 264 : isolate_(isolate),
264 capacity_(0), 265 capacity_(0),
265 capacity_executable_(0), 266 capacity_executable_(0),
266 size_(0), 267 memory_allocator_reserved_(0),
267 size_executable_(0) { 268 size_executable_(0) {
268 } 269 }
269 270
270 271
271 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { 272 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
272 capacity_ = RoundUp(capacity, Page::kPageSize); 273 capacity_ = RoundUp(capacity, Page::kPageSize);
273 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); 274 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
274 ASSERT_GE(capacity_, capacity_executable_); 275 ASSERT_GE(capacity_, capacity_executable_);
275 276
276 size_ = 0; 277 memory_allocator_reserved_ = 0;
277 size_executable_ = 0; 278 size_executable_ = 0;
278 279
279 return true; 280 return true;
280 } 281 }
281 282
282 283
283 void MemoryAllocator::TearDown() { 284 void MemoryAllocator::TearDown() {
284 // Check that spaces were torn down before MemoryAllocator. 285 // Check that spaces were torn down before MemoryAllocator.
285 ASSERT(size_ == 0); 286 CHECK_EQ(memory_allocator_reserved_, 0);
286 // TODO(gc) this will be true again when we fix FreeMemory. 287 // TODO(gc) this will be true again when we fix FreeMemory.
287 // ASSERT(size_executable_ == 0); 288 // ASSERT(size_executable_ == 0);
288 capacity_ = 0; 289 capacity_ = 0;
289 capacity_executable_ = 0; 290 capacity_executable_ = 0;
290 } 291 }
291 292
292 293
293 void MemoryAllocator::FreeMemory(VirtualMemory* reservation, 294 void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
294 Executability executable) { 295 Executability executable) {
295 // TODO(gc) make code_range part of memory allocator? 296 // TODO(gc) make code_range part of memory allocator?
296 ASSERT(reservation->IsReserved()); 297 ASSERT(reservation->IsReserved());
297 size_t size = reservation->size(); 298 size_t size = reservation->size();
298 ASSERT(size_ >= size); 299 ASSERT(memory_allocator_reserved_ >= size);
299 size_ -= size; 300 memory_allocator_reserved_ -= size;
300 301
301 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); 302 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
302 303
303 if (executable == EXECUTABLE) { 304 if (executable == EXECUTABLE) {
304 ASSERT(size_executable_ >= size); 305 ASSERT(size_executable_ >= size);
305 size_executable_ -= size; 306 size_executable_ -= size;
306 } 307 }
307 // Code which is part of the code-range does not have its own VirtualMemory. 308 // Code which is part of the code-range does not have its own VirtualMemory.
308 ASSERT(!isolate_->code_range()->contains( 309 ASSERT(!isolate_->code_range()->contains(
309 static_cast<Address>(reservation->address()))); 310 static_cast<Address>(reservation->address())));
310 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); 311 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
311 reservation->Release(); 312 reservation->Release();
312 } 313 }
313 314
314 315
315 void MemoryAllocator::FreeMemory(Address base, 316 void MemoryAllocator::FreeMemory(Address base,
316 size_t size, 317 size_t size,
317 Executability executable) { 318 Executability executable) {
318 // TODO(gc) make code_range part of memory allocator? 319 // TODO(gc) make code_range part of memory allocator?
319 ASSERT(size_ >= size); 320 ASSERT(memory_allocator_reserved_ >= size);
320 size_ -= size; 321 memory_allocator_reserved_ -= size;
321 322
322 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); 323 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
323 324
324 if (executable == EXECUTABLE) { 325 if (executable == EXECUTABLE) {
325 ASSERT(size_executable_ >= size); 326 ASSERT(size_executable_ >= size);
326 size_executable_ -= size; 327 size_executable_ -= size;
327 } 328 }
328 if (isolate_->code_range()->contains(static_cast<Address>(base))) { 329 if (isolate_->code_range()->contains(static_cast<Address>(base))) {
329 ASSERT(executable == EXECUTABLE); 330 ASSERT(executable == EXECUTABLE);
330 isolate_->code_range()->FreeRawMemory(base, size); 331 isolate_->code_range()->FreeRawMemory(base, size);
331 } else { 332 } else {
332 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); 333 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
333 bool result = VirtualMemory::ReleaseRegion(base, size); 334 bool result = VirtualMemory::ReleaseRegion(base, size);
334 USE(result); 335 USE(result);
335 ASSERT(result); 336 ASSERT(result);
336 } 337 }
337 } 338 }
338 339
339 340
340 Address MemoryAllocator::ReserveAlignedMemory(size_t size, 341 Address MemoryAllocator::ReserveAlignedMemory(size_t size,
341 size_t alignment, 342 size_t alignment,
342 VirtualMemory* controller) { 343 VirtualMemory* controller) {
343 VirtualMemory reservation(size, alignment); 344 VirtualMemory reservation(size, alignment);
344 345
345 if (!reservation.IsReserved()) return NULL; 346 if (!reservation.IsReserved()) return NULL;
346 size_ += reservation.size(); 347 memory_allocator_reserved_ += reservation.size();
347 Address base = RoundUp(static_cast<Address>(reservation.address()), 348 Address base = RoundUp(static_cast<Address>(reservation.address()),
348 alignment); 349 alignment);
349 controller->TakeControl(&reservation); 350 controller->TakeControl(&reservation);
350 return base; 351 return base;
351 } 352 }
352 353
353 354
354 Address MemoryAllocator::AllocateAlignedMemory(size_t size, 355 Address MemoryAllocator::AllocateAlignedMemory(size_t size,
356 size_t reserved_size,
355 size_t alignment, 357 size_t alignment,
356 Executability executable, 358 Executability executable,
357 VirtualMemory* controller) { 359 VirtualMemory* controller) {
360 ASSERT(RoundUp(reserved_size, OS::CommitPageSize()) >=
361 RoundUp(size, OS::CommitPageSize()));
358 VirtualMemory reservation; 362 VirtualMemory reservation;
359 Address base = ReserveAlignedMemory(size, alignment, &reservation); 363 Address base = ReserveAlignedMemory(reserved_size, alignment, &reservation);
360 if (base == NULL) return NULL; 364 if (base == NULL) return NULL;
361 if (!reservation.Commit(base, 365 if (!reservation.Commit(base,
362 size, 366 size,
363 executable == EXECUTABLE)) { 367 executable == EXECUTABLE)) {
364 return NULL; 368 return NULL;
365 } 369 }
366 controller->TakeControl(&reservation); 370 controller->TakeControl(&reservation);
367 return base; 371 return base;
368 } 372 }
369 373
370 374
371 void Page::InitializeAsAnchor(PagedSpace* owner) { 375 void Page::InitializeAsAnchor(PagedSpace* owner) {
372 set_owner(owner); 376 set_owner(owner);
373 set_prev_page(this); 377 set_prev_page(this);
374 set_next_page(this); 378 set_next_page(this);
375 } 379 }
376 380
377 381
382 void Page::CommitMore(intptr_t space_needed) {
383 intptr_t reserved_page_size = reservation_.IsReserved() ?
384 reservation_.size() :
385 Page::kPageSize;
386 ASSERT(size() + space_needed <= reserved_page_size);
387 // At increase the page size by at least 64k (this also rounds to OS page
388 // size).
389 int expand = Min(reserved_page_size - size(),
390 RoundUp(size() + space_needed, Page::kGrowthUnit) - size());
391 ASSERT(expand <= kPageSize - size());
392 ASSERT(expand <= reserved_page_size - size());
393 Executability executable =
394 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
395 Address old_end = ObjectAreaEnd();
396 if (!VirtualMemory::CommitRegion(old_end, expand, executable)) return;
397
398 set_size(size() + expand);
399
400 PagedSpace* paged_space = reinterpret_cast<PagedSpace*>(owner());
401 paged_space->heap()->isolate()->memory_allocator()->AllocationBookkeeping(
402 paged_space,
403 old_end,
404 0, // No new memory was reserved.
405 expand, // New memory committed.
406 executable);
407 paged_space->IncreaseCapacity(expand);
408
409 // In spaces with alignment requirements (e.g. map space) we have to align
410 // the expanded area with the correct object alignment.
411 uintptr_t object_area_size = old_end - ObjectAreaStart();
412 uintptr_t aligned_object_area_size =
413 object_area_size - object_area_size % paged_space->ObjectAlignment();
414 if (aligned_object_area_size != object_area_size) {
415 aligned_object_area_size += paged_space->ObjectAlignment();
416 }
417 Address new_area =
418 reinterpret_cast<Address>(ObjectAreaStart() + aligned_object_area_size);
419 // In spaces with alignment requirements, this will waste the space for one
420 // object per doubling of the page size until the next GC.
421 paged_space->AddToFreeLists(old_end, new_area - old_end);
422
423 expand -= (new_area - old_end);
424
425 paged_space->AddToFreeLists(new_area, expand);
426 }
427
428
378 NewSpacePage* NewSpacePage::Initialize(Heap* heap, 429 NewSpacePage* NewSpacePage::Initialize(Heap* heap,
379 Address start, 430 Address start,
380 SemiSpace* semi_space) { 431 SemiSpace* semi_space) {
381 MemoryChunk* chunk = MemoryChunk::Initialize(heap, 432 MemoryChunk* chunk = MemoryChunk::Initialize(heap,
382 start, 433 start,
383 Page::kPageSize, 434 Page::kPageSize,
384 NOT_EXECUTABLE, 435 NOT_EXECUTABLE,
385 semi_space); 436 semi_space);
386 chunk->set_next_chunk(NULL); 437 chunk->set_next_chunk(NULL);
387 chunk->set_prev_chunk(NULL); 438 chunk->set_prev_chunk(NULL);
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
453 ClearFlag(SCAN_ON_SCAVENGE); 504 ClearFlag(SCAN_ON_SCAVENGE);
454 } 505 }
455 next_chunk_->prev_chunk_ = prev_chunk_; 506 next_chunk_->prev_chunk_ = prev_chunk_;
456 prev_chunk_->next_chunk_ = next_chunk_; 507 prev_chunk_->next_chunk_ = next_chunk_;
457 prev_chunk_ = NULL; 508 prev_chunk_ = NULL;
458 next_chunk_ = NULL; 509 next_chunk_ = NULL;
459 } 510 }
460 511
461 512
462 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, 513 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
514 intptr_t committed_body_size,
463 Executability executable, 515 Executability executable,
464 Space* owner) { 516 Space* owner) {
465 size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size; 517 ASSERT(body_size >= committed_body_size);
518 size_t chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + body_size,
519 OS::CommitPageSize());
520 intptr_t committed_chunk_size =
521 committed_body_size + MemoryChunk::kObjectStartOffset;
522 committed_chunk_size = RoundUp(committed_chunk_size, OS::CommitPageSize());
466 Heap* heap = isolate_->heap(); 523 Heap* heap = isolate_->heap();
467 Address base = NULL; 524 Address base = NULL;
468 VirtualMemory reservation; 525 VirtualMemory reservation;
469 if (executable == EXECUTABLE) { 526 if (executable == EXECUTABLE) {
470 // Check executable memory limit. 527 // Check executable memory limit.
471 if (size_executable_ + chunk_size > capacity_executable_) { 528 if (size_executable_ + chunk_size > capacity_executable_) {
472 LOG(isolate_, 529 LOG(isolate_,
473 StringEvent("MemoryAllocator::AllocateRawMemory", 530 StringEvent("MemoryAllocator::AllocateRawMemory",
474 "V8 Executable Allocation capacity exceeded")); 531 "V8 Executable Allocation capacity exceeded"));
475 return NULL; 532 return NULL;
476 } 533 }
477 534
478 // Allocate executable memory either from code range or from the 535 // Allocate executable memory either from code range or from the
479 // OS. 536 // OS.
480 if (isolate_->code_range()->exists()) { 537 if (isolate_->code_range()->exists()) {
481 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); 538 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
482 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), 539 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
483 MemoryChunk::kAlignment)); 540 MemoryChunk::kAlignment));
484 if (base == NULL) return NULL; 541 if (base == NULL) return NULL;
485 size_ += chunk_size; 542 // The AllocateAlignedMemory method will update the memory allocator
486 // Update executable memory size. 543 // memory used, but we are not using that if we have a code range, so
487 size_executable_ += chunk_size; 544 // we update it here.
545 memory_allocator_reserved_ += chunk_size;
488 } else { 546 } else {
489 base = AllocateAlignedMemory(chunk_size, 547 base = AllocateAlignedMemory(committed_chunk_size,
548 chunk_size,
490 MemoryChunk::kAlignment, 549 MemoryChunk::kAlignment,
491 executable, 550 executable,
492 &reservation); 551 &reservation);
493 if (base == NULL) return NULL; 552 if (base == NULL) return NULL;
494 // Update executable memory size.
495 size_executable_ += reservation.size();
496 } 553 }
497 } else { 554 } else {
498 base = AllocateAlignedMemory(chunk_size, 555 base = AllocateAlignedMemory(committed_chunk_size,
556 chunk_size,
499 MemoryChunk::kAlignment, 557 MemoryChunk::kAlignment,
500 executable, 558 executable,
501 &reservation); 559 &reservation);
502 560
503 if (base == NULL) return NULL; 561 if (base == NULL) return NULL;
504 } 562 }
505 563
506 #ifdef DEBUG 564 AllocationBookkeeping(
507 ZapBlock(base, chunk_size); 565 owner, base, chunk_size, committed_chunk_size, executable);
508 #endif
509 isolate_->counters()->memory_allocated()->
510 Increment(static_cast<int>(chunk_size));
511
512 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
513 if (owner != NULL) {
514 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
515 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
516 }
517 566
518 MemoryChunk* result = MemoryChunk::Initialize(heap, 567 MemoryChunk* result = MemoryChunk::Initialize(heap,
519 base, 568 base,
520 chunk_size, 569 committed_chunk_size,
521 executable, 570 executable,
522 owner); 571 owner);
523 result->set_reserved_memory(&reservation); 572 result->set_reserved_memory(&reservation);
524 return result; 573 return result;
525 } 574 }
526 575
527 576
528 Page* MemoryAllocator::AllocatePage(PagedSpace* owner, 577 void MemoryAllocator::AllocationBookkeeping(Space* owner,
578 Address base,
579 intptr_t reserved_chunk_size,
580 intptr_t committed_chunk_size,
581 Executability executable) {
582 if (executable == EXECUTABLE) {
583 // Update executable memory size.
584 size_executable_ += reserved_chunk_size;
585 }
586
587 #ifdef DEBUG
588 ZapBlock(base, committed_chunk_size);
589 #endif
590 isolate_->counters()->memory_allocated()->
591 Increment(static_cast<int>(committed_chunk_size));
592
593 LOG(isolate_, NewEvent("MemoryChunk", base, committed_chunk_size));
594 if (owner != NULL) {
595 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
596 PerformAllocationCallback(
597 space, kAllocationActionAllocate, committed_chunk_size);
598 }
599 }
600
601
602 Page* MemoryAllocator::AllocatePage(intptr_t committed_object_area_size,
603 PagedSpace* owner,
529 Executability executable) { 604 Executability executable) {
530 MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner); 605 ASSERT(committed_object_area_size <= Page::kObjectAreaSize);
606
607 MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize,
608 committed_object_area_size,
609 executable,
610 owner);
531 611
532 if (chunk == NULL) return NULL; 612 if (chunk == NULL) return NULL;
533 613
534 return Page::Initialize(isolate_->heap(), chunk, executable, owner); 614 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
535 } 615 }
536 616
537 617
538 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, 618 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
539 Executability executable, 619 Executability executable,
540 Space* owner) { 620 Space* owner) {
541 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner); 621 MemoryChunk* chunk =
622 AllocateChunk(object_size, object_size, executable, owner);
542 if (chunk == NULL) return NULL; 623 if (chunk == NULL) return NULL;
543 return LargePage::Initialize(isolate_->heap(), chunk); 624 return LargePage::Initialize(isolate_->heap(), chunk);
544 } 625 }
545 626
546 627
547 void MemoryAllocator::Free(MemoryChunk* chunk) { 628 void MemoryAllocator::Free(MemoryChunk* chunk) {
548 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); 629 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
549 if (chunk->owner() != NULL) { 630 if (chunk->owner() != NULL) {
550 ObjectSpace space = 631 ObjectSpace space =
551 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); 632 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
552 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); 633 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
553 } 634 }
554 635
555 delete chunk->slots_buffer(); 636 delete chunk->slots_buffer();
556 delete chunk->skip_list(); 637 delete chunk->skip_list();
557 638
558 VirtualMemory* reservation = chunk->reserved_memory(); 639 VirtualMemory* reservation = chunk->reserved_memory();
559 if (reservation->IsReserved()) { 640 if (reservation->IsReserved()) {
560 FreeMemory(reservation, chunk->executable()); 641 FreeMemory(reservation, chunk->executable());
561 } else { 642 } else {
643 // When we do not have a reservation that is because this allocation
644 // is part of the huge reserved chunk of memory reserved for code on
645 // x64. In that case the size was rounded up to the page size on
646 // allocation so we do the same now when freeing.
562 FreeMemory(chunk->address(), 647 FreeMemory(chunk->address(),
563 chunk->size(), 648 RoundUp(chunk->size(), Page::kPageSize),
564 chunk->executable()); 649 chunk->executable());
565 } 650 }
566 } 651 }
567 652
568 653
569 bool MemoryAllocator::CommitBlock(Address start, 654 bool MemoryAllocator::CommitBlock(Address start,
570 size_t size, 655 size_t size,
571 Executability executable) { 656 Executability executable) {
572 if (!VirtualMemory::CommitRegion(start, size, executable)) return false; 657 if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
573 #ifdef DEBUG 658 #ifdef DEBUG
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
633 memory_allocation_callbacks_.Remove(i); 718 memory_allocation_callbacks_.Remove(i);
634 return; 719 return;
635 } 720 }
636 } 721 }
637 UNREACHABLE(); 722 UNREACHABLE();
638 } 723 }
639 724
640 725
641 #ifdef DEBUG 726 #ifdef DEBUG
642 void MemoryAllocator::ReportStatistics() { 727 void MemoryAllocator::ReportStatistics() {
643 float pct = static_cast<float>(capacity_ - size_) / capacity_; 728 float pct =
729 static_cast<float>(capacity_ - memory_allocator_reserved_) / capacity_;
644 PrintF(" capacity: %" V8_PTR_PREFIX "d" 730 PrintF(" capacity: %" V8_PTR_PREFIX "d"
645 ", used: %" V8_PTR_PREFIX "d" 731 ", used: %" V8_PTR_PREFIX "d"
646 ", available: %%%d\n\n", 732 ", available: %%%d\n\n",
647 capacity_, size_, static_cast<int>(pct*100)); 733 capacity_, memory_allocator_reserved_, static_cast<int>(pct*100));
648 } 734 }
649 #endif 735 #endif
650 736
651 // ----------------------------------------------------------------------------- 737 // -----------------------------------------------------------------------------
652 // MemoryChunk implementation 738 // MemoryChunk implementation
653 739
654 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) { 740 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
655 MemoryChunk* chunk = MemoryChunk::FromAddress(address); 741 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
656 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) { 742 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
657 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by); 743 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
716 Address next = cur + obj->Size(); 802 Address next = cur + obj->Size();
717 if ((cur <= addr) && (addr < next)) return obj; 803 if ((cur <= addr) && (addr < next)) return obj;
718 } 804 }
719 805
720 UNREACHABLE(); 806 UNREACHABLE();
721 return Failure::Exception(); 807 return Failure::Exception();
722 } 808 }
723 809
724 bool PagedSpace::CanExpand() { 810 bool PagedSpace::CanExpand() {
725 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); 811 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
726 ASSERT(Capacity() % Page::kObjectAreaSize == 0);
727 812
728 if (Capacity() == max_capacity_) return false; 813 if (Capacity() == max_capacity_) return false;
729 814
730 ASSERT(Capacity() < max_capacity_); 815 ASSERT(Capacity() < max_capacity_);
731 816
732 // Are we going to exceed capacity for this space? 817 // Are we going to exceed capacity for this space?
733 if ((Capacity() + Page::kPageSize) > max_capacity_) return false; 818 if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
734 819
735 return true; 820 return true;
736 } 821 }
737 822
738 bool PagedSpace::Expand() { 823 bool PagedSpace::Expand(intptr_t size_in_bytes) {
739 if (!CanExpand()) return false; 824 if (!CanExpand()) return false;
740 825
826 Page* last_page = anchor_.prev_page();
827 if (last_page != &anchor_) {
828 // We have run out of linear allocation space. This may be because the
829 // most recently allocated page (stored last in the list) is a small one,
830 // that starts on a page aligned boundary, but has not a full kPageSize of
831 // committed memory. Let's commit more memory for the page.
832 intptr_t reserved_page_size = last_page->reserved_memory()->IsReserved() ?
833 last_page->reserved_memory()->size() :
834 Page::kPageSize;
835 if (last_page->size() < reserved_page_size &&
836 (reserved_page_size - last_page->size()) >= size_in_bytes &&
837 !last_page->IsEvacuationCandidate() &&
838 last_page->WasSwept()) {
839 last_page->CommitMore(size_in_bytes);
840 return true;
841 }
842 }
843
844 // We initially only commit a part of the page, but the deserialization
845 // of the initial snapshot makes the assumption that it can deserialize
846 // into linear memory of a certain size per space, so some of the spaces
847 // need to have a little more committed memory.
848 int initial =
849 Max(OS::CommitPageSize(), static_cast<intptr_t>(Page::kGrowthUnit));
850
851 ASSERT(Page::kPageSize - initial < Page::kObjectAreaSize);
852
853 intptr_t expansion_size =
854 Max(initial,
855 RoundUpToPowerOf2(MemoryChunk::kObjectStartOffset + size_in_bytes)) -
856 MemoryChunk::kObjectStartOffset;
857
741 Page* p = heap()->isolate()->memory_allocator()-> 858 Page* p = heap()->isolate()->memory_allocator()->
742 AllocatePage(this, executable()); 859 AllocatePage(expansion_size, this, executable());
743 if (p == NULL) return false; 860 if (p == NULL) return false;
744 861
745 ASSERT(Capacity() <= max_capacity_); 862 ASSERT(Capacity() <= max_capacity_);
746 863
747 p->InsertAfter(anchor_.prev_page()); 864 p->InsertAfter(anchor_.prev_page());
748 865
749 return true; 866 return true;
750 } 867 }
751 868
752 869
(...skipping 24 matching lines...) Expand all
777 accounting_stats_.AllocateBytes(size); 894 accounting_stats_.AllocateBytes(size);
778 ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size)); 895 ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size));
779 } else { 896 } else {
780 DecreaseUnsweptFreeBytes(page); 897 DecreaseUnsweptFreeBytes(page);
781 } 898 }
782 899
783 if (Page::FromAllocationTop(allocation_info_.top) == page) { 900 if (Page::FromAllocationTop(allocation_info_.top) == page) {
784 allocation_info_.top = allocation_info_.limit = NULL; 901 allocation_info_.top = allocation_info_.limit = NULL;
785 } 902 }
786 903
904 intptr_t size = page->ObjectAreaEnd() - page->ObjectAreaStart();
905
787 page->Unlink(); 906 page->Unlink();
788 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { 907 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
789 heap()->isolate()->memory_allocator()->Free(page); 908 heap()->isolate()->memory_allocator()->Free(page);
790 } else { 909 } else {
791 heap()->QueueMemoryChunkForFree(page); 910 heap()->QueueMemoryChunkForFree(page);
792 } 911 }
793 912
794 ASSERT(Capacity() > 0); 913 ASSERT(Capacity() > 0);
795 ASSERT(Capacity() % Page::kObjectAreaSize == 0); 914 accounting_stats_.ShrinkSpace(size);
796 accounting_stats_.ShrinkSpace(Page::kObjectAreaSize);
797 } 915 }
798 916
799 917
800 void PagedSpace::ReleaseAllUnusedPages() { 918 void PagedSpace::ReleaseAllUnusedPages() {
801 PageIterator it(this); 919 PageIterator it(this);
802 while (it.has_next()) { 920 while (it.has_next()) {
803 Page* page = it.next(); 921 Page* page = it.next();
804 if (!page->WasSwept()) { 922 if (!page->WasSwept()) {
805 if (page->LiveBytes() == 0) ReleasePage(page); 923 if (page->LiveBytes() == 0) ReleasePage(page);
806 } else { 924 } else {
(...skipping 857 matching lines...) Expand 10 before | Expand all | Expand 10 after
1664 // Free lists for old object spaces implementation 1782 // Free lists for old object spaces implementation
1665 1783
1666 void FreeListNode::set_size(Heap* heap, int size_in_bytes) { 1784 void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
1667 ASSERT(size_in_bytes > 0); 1785 ASSERT(size_in_bytes > 0);
1668 ASSERT(IsAligned(size_in_bytes, kPointerSize)); 1786 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1669 1787
1670 // We write a map and possibly size information to the block. If the block 1788 // We write a map and possibly size information to the block. If the block
1671 // is big enough to be a FreeSpace with at least one extra word (the next 1789 // is big enough to be a FreeSpace with at least one extra word (the next
1672 // pointer), we set its map to be the free space map and its size to an 1790 // pointer), we set its map to be the free space map and its size to an
1673 // appropriate array length for the desired size from HeapObject::Size(). 1791 // appropriate array length for the desired size from HeapObject::Size().
1674 // If the block is too small (eg, one or two words), to hold both a size 1792 // If the block is too small (e.g. one or two words), to hold both a size
1675 // field and a next pointer, we give it a filler map that gives it the 1793 // field and a next pointer, we give it a filler map that gives it the
1676 // correct size. 1794 // correct size.
1677 if (size_in_bytes > FreeSpace::kHeaderSize) { 1795 if (size_in_bytes > FreeSpace::kHeaderSize) {
1678 set_map_no_write_barrier(heap->raw_unchecked_free_space_map()); 1796 set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
1679 // Can't use FreeSpace::cast because it fails during deserialization. 1797 // Can't use FreeSpace::cast because it fails during deserialization.
1680 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this); 1798 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
1681 this_as_free_space->set_size(size_in_bytes); 1799 this_as_free_space->set_size(size_in_bytes);
1682 } else if (size_in_bytes == kPointerSize) { 1800 } else if (size_in_bytes == kPointerSize) {
1683 set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map()); 1801 set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
1684 } else if (size_in_bytes == 2 * kPointerSize) { 1802 } else if (size_in_bytes == 2 * kPointerSize) {
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
1768 } else { 1886 } else {
1769 node->set_next(huge_list_); 1887 node->set_next(huge_list_);
1770 huge_list_ = node; 1888 huge_list_ = node;
1771 } 1889 }
1772 available_ += size_in_bytes; 1890 available_ += size_in_bytes;
1773 ASSERT(IsVeryLong() || available_ == SumFreeLists()); 1891 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1774 return 0; 1892 return 0;
1775 } 1893 }
1776 1894
1777 1895
1778 FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) { 1896 FreeListNode* FreeList::PickNodeFromList(FreeListNode** list,
1897 int* node_size,
1898 int minimum_size) {
1779 FreeListNode* node = *list; 1899 FreeListNode* node = *list;
1780 1900
1781 if (node == NULL) return NULL; 1901 if (node == NULL) return NULL;
1782 1902
1903 ASSERT(node->map() == node->GetHeap()->raw_unchecked_free_space_map());
1904
1783 while (node != NULL && 1905 while (node != NULL &&
1784 Page::FromAddress(node->address())->IsEvacuationCandidate()) { 1906 Page::FromAddress(node->address())->IsEvacuationCandidate()) {
1785 available_ -= node->Size(); 1907 available_ -= node->Size();
1786 node = node->next(); 1908 node = node->next();
1787 } 1909 }
1788 1910
1789 if (node != NULL) { 1911 if (node == NULL) {
1790 *node_size = node->Size();
1791 *list = node->next();
1792 } else {
1793 *list = NULL; 1912 *list = NULL;
1913 return NULL;
1794 } 1914 }
1795 1915
1916 // Gets the size without checking the map. When we are booting we have
1917 // a FreeListNode before we have created its map.
1918 intptr_t size = reinterpret_cast<FreeSpace*>(node)->Size();
1919
1920 // We don't search the list for one that fits, preferring to look in the
1921 // list of larger nodes, but we do check the first in the list, because
1922 // if we had to expand the space or page we may have placed an entry that
1923 // was just long enough at the head of one of the lists.
1924 if (size < minimum_size) return NULL;
1925
1926 *node_size = size;
1927 available_ -= size;
1928 *list = node->next();
1929
1796 return node; 1930 return node;
1797 } 1931 }
1798 1932
1799 1933
1800 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) { 1934 FreeListNode* FreeList::FindAbuttingNode(
1935 int size_in_bytes, int* node_size, Address limit, FreeListNode** list_head) {
1936 FreeListNode* first_node = *list_head;
1937 if (first_node != NULL &&
1938 first_node->address() == limit &&
1939 reinterpret_cast<FreeSpace*>(first_node)->Size() >= size_in_bytes &&
1940 !Page::FromAddress(first_node->address())->IsEvacuationCandidate()) {
1941 FreeListNode* answer = first_node;
1942 int size = reinterpret_cast<FreeSpace*>(first_node)->Size();
1943 available_ -= size;
1944 *node_size = size;
1945 *list_head = first_node->next();
1946 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1947 return answer;
1948 }
1949 return NULL;
1950 }
1951
1952
1953 FreeListNode* FreeList::FindNodeFor(int size_in_bytes,
1954 int* node_size,
1955 Address limit) {
1801 FreeListNode* node = NULL; 1956 FreeListNode* node = NULL;
1802 1957
1803 if (size_in_bytes <= kSmallAllocationMax) { 1958 if (limit != NULL) {
1804 node = PickNodeFromList(&small_list_, node_size); 1959 // We may have a memory area at the head of the free list, which abuts the
1960 // old linear allocation area. This happens if the linear allocation area
1961 // has been shortened to allow an incremental marking step to be performed.
1962 // In that case we prefer to return the free memory area that is contiguous
1963 // with the old linear allocation area.
1964 node = FindAbuttingNode(size_in_bytes, node_size, limit, &large_list_);
1965 if (node != NULL) return node;
1966 node = FindAbuttingNode(size_in_bytes, node_size, limit, &huge_list_);
1805 if (node != NULL) return node; 1967 if (node != NULL) return node;
1806 } 1968 }
1807 1969
1808 if (size_in_bytes <= kMediumAllocationMax) { 1970 node = PickNodeFromList(&small_list_, node_size, size_in_bytes);
1809 node = PickNodeFromList(&medium_list_, node_size); 1971 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1972 if (node != NULL) return node;
1973
1974 node = PickNodeFromList(&medium_list_, node_size, size_in_bytes);
1975 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1976 if (node != NULL) return node;
1977
1978 node = PickNodeFromList(&large_list_, node_size, size_in_bytes);
1979 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1980 if (node != NULL) return node;
1981
1982 // The tricky third clause in this for statement is due to the fact that
1983 // PickNodeFromList can cut pages out of the list if they are unavailable for
1984 // new allocation (e.g. if they are on a page that has been scheduled for
1985 // evacuation).
1986 for (FreeListNode** cur = &huge_list_;
1987 *cur != NULL;
1988 cur = (*cur) == NULL ? cur : (*cur)->next_address()) {
1989 node = PickNodeFromList(cur, node_size, size_in_bytes);
1990 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1810 if (node != NULL) return node; 1991 if (node != NULL) return node;
1811 } 1992 }
1812 1993
1813 if (size_in_bytes <= kLargeAllocationMax) {
1814 node = PickNodeFromList(&large_list_, node_size);
1815 if (node != NULL) return node;
1816 }
1817
1818 for (FreeListNode** cur = &huge_list_;
1819 *cur != NULL;
1820 cur = (*cur)->next_address()) {
1821 FreeListNode* cur_node = *cur;
1822 while (cur_node != NULL &&
1823 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
1824 available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
1825 cur_node = cur_node->next();
1826 }
1827
1828 *cur = cur_node;
1829 if (cur_node == NULL) break;
1830
1831 ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
1832 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
1833 int size = cur_as_free_space->Size();
1834 if (size >= size_in_bytes) {
1835 // Large enough node found. Unlink it from the list.
1836 node = *cur;
1837 *node_size = size;
1838 *cur = node->next();
1839 break;
1840 }
1841 }
1842
1843 return node; 1994 return node;
1844 } 1995 }
1845 1996
1846 1997
1847 // Allocation on the old space free list. If it succeeds then a new linear 1998 // Allocation on the old space free list. If it succeeds then a new linear
1848 // allocation space has been set up with the top and limit of the space. If 1999 // allocation space has been set up with the top and limit of the space. If
1849 // the allocation fails then NULL is returned, and the caller can perform a GC 2000 // the allocation fails then NULL is returned, and the caller can perform a GC
1850 // or allocate a new page before retrying. 2001 // or allocate a new page before retrying.
1851 HeapObject* FreeList::Allocate(int size_in_bytes) { 2002 HeapObject* FreeList::Allocate(int size_in_bytes) {
1852 ASSERT(0 < size_in_bytes); 2003 ASSERT(0 < size_in_bytes);
1853 ASSERT(size_in_bytes <= kMaxBlockSize); 2004 ASSERT(size_in_bytes <= kMaxBlockSize);
1854 ASSERT(IsAligned(size_in_bytes, kPointerSize)); 2005 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1855 // Don't free list allocate if there is linear space available. 2006 // Don't free list allocate if there is linear space available.
1856 ASSERT(owner_->limit() - owner_->top() < size_in_bytes); 2007 ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
1857 2008
1858 int new_node_size = 0; 2009 int new_node_size = 0;
1859 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); 2010 FreeListNode* new_node =
2011 FindNodeFor(size_in_bytes, &new_node_size, owner_->limit());
1860 if (new_node == NULL) return NULL; 2012 if (new_node == NULL) return NULL;
1861 2013
1862 available_ -= new_node_size; 2014 if (new_node->address() == owner_->limit()) {
2015 // The new freelist node we were given is an extension of the one we had
2016 // last. This is a common thing to happen when we extend a small page by
2017 // committing more memory. In this case we just add the new node to the
2018 // linear allocation area and recurse.
2019 owner_->Allocate(new_node_size);
2020 owner_->SetTop(owner_->top(), new_node->address() + new_node_size);
2021 MaybeObject* allocation = owner_->AllocateRaw(size_in_bytes);
2022 Object* answer;
2023 if (!allocation->ToObject(&answer)) return NULL;
2024 return HeapObject::cast(answer);
2025 }
2026
1863 ASSERT(IsVeryLong() || available_ == SumFreeLists()); 2027 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1864 2028
1865 int bytes_left = new_node_size - size_in_bytes; 2029 int bytes_left = new_node_size - size_in_bytes;
1866 ASSERT(bytes_left >= 0); 2030 ASSERT(bytes_left >= 0);
1867 2031
1868 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); 2032 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
1869 // Mark the old linear allocation area with a free space map so it can be 2033 // Mark the old linear allocation area with a free space map so it can be
1870 // skipped when scanning the heap. This also puts it back in the free list 2034 // skipped when scanning the heap. This also puts it back in the free list
1871 // if it is big enough. 2035 // if it is big enough.
1872 owner_->Free(owner_->top(), old_linear_size); 2036 if (old_linear_size != 0) {
2037 owner_->AddToFreeLists(owner_->top(), old_linear_size);
2038 }
1873 2039
1874 #ifdef DEBUG 2040 #ifdef DEBUG
1875 for (int i = 0; i < size_in_bytes / kPointerSize; i++) { 2041 for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
1876 reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0); 2042 reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0);
1877 } 2043 }
1878 #endif 2044 #endif
1879 2045
1880 owner_->heap()->incremental_marking()->OldSpaceStep( 2046 owner_->heap()->incremental_marking()->OldSpaceStep(
1881 size_in_bytes - old_linear_size); 2047 size_in_bytes - old_linear_size);
1882 2048
1883 // The old-space-step might have finished sweeping and restarted marking. 2049 // The old-space-step might have finished sweeping and restarted marking.
1884 // Verify that it did not turn the page of the new node into an evacuation 2050 // Verify that it did not turn the page of the new node into an evacuation
1885 // candidate. 2051 // candidate.
1886 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); 2052 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
1887 2053
1888 const int kThreshold = IncrementalMarking::kAllocatedThreshold; 2054 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
1889 2055
1890 // Memory in the linear allocation area is counted as allocated. We may free 2056 // Memory in the linear allocation area is counted as allocated. We may free
1891 // a little of this again immediately - see below. 2057 // a little of this again immediately - see below.
1892 owner_->Allocate(new_node_size); 2058 owner_->Allocate(new_node_size);
1893 2059
1894 if (bytes_left > kThreshold && 2060 if (bytes_left > kThreshold &&
1895 owner_->heap()->incremental_marking()->IsMarkingIncomplete() && 2061 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
1896 FLAG_incremental_marking_steps) { 2062 FLAG_incremental_marking_steps) {
1897 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); 2063 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
1898 // We don't want to give too large linear areas to the allocator while 2064 // We don't want to give too large linear areas to the allocator while
1899 // incremental marking is going on, because we won't check again whether 2065 // incremental marking is going on, because we won't check again whether
1900 // we want to do another increment until the linear area is used up. 2066 // we want to do another increment until the linear area is used up.
1901 owner_->Free(new_node->address() + size_in_bytes + linear_size, 2067 owner_->AddToFreeLists(new_node->address() + size_in_bytes + linear_size,
1902 new_node_size - size_in_bytes - linear_size); 2068 new_node_size - size_in_bytes - linear_size);
1903 owner_->SetTop(new_node->address() + size_in_bytes, 2069 owner_->SetTop(new_node->address() + size_in_bytes,
1904 new_node->address() + size_in_bytes + linear_size); 2070 new_node->address() + size_in_bytes + linear_size);
1905 } else if (bytes_left > 0) { 2071 } else if (bytes_left > 0) {
1906 // Normally we give the rest of the node to the allocator as its new 2072 // Normally we give the rest of the node to the allocator as its new
1907 // linear allocation area. 2073 // linear allocation area.
1908 owner_->SetTop(new_node->address() + size_in_bytes, 2074 owner_->SetTop(new_node->address() + size_in_bytes,
1909 new_node->address() + new_node_size); 2075 new_node->address() + new_node_size);
1910 } else { 2076 } else {
2077 ASSERT(bytes_left == 0);
1911 // TODO(gc) Try not freeing linear allocation region when bytes_left 2078 // TODO(gc) Try not freeing linear allocation region when bytes_left
1912 // are zero. 2079 // are zero.
1913 owner_->SetTop(NULL, NULL); 2080 owner_->SetTop(NULL, NULL);
1914 } 2081 }
1915 2082
1916 return new_node; 2083 return new_node;
1917 } 2084 }
1918 2085
1919 2086
1920 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) { 2087 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
2033 // or because we have lowered the limit in order to get periodic incremental 2200 // or because we have lowered the limit in order to get periodic incremental
2034 // marking. The most reliable way to ensure that there is linear space is 2201 // marking. The most reliable way to ensure that there is linear space is
2035 // to do the allocation, then rewind the limit. 2202 // to do the allocation, then rewind the limit.
2036 ASSERT(bytes <= InitialCapacity()); 2203 ASSERT(bytes <= InitialCapacity());
2037 MaybeObject* maybe = AllocateRaw(bytes); 2204 MaybeObject* maybe = AllocateRaw(bytes);
2038 Object* object = NULL; 2205 Object* object = NULL;
2039 if (!maybe->ToObject(&object)) return false; 2206 if (!maybe->ToObject(&object)) return false;
2040 HeapObject* allocation = HeapObject::cast(object); 2207 HeapObject* allocation = HeapObject::cast(object);
2041 Address top = allocation_info_.top; 2208 Address top = allocation_info_.top;
2042 if ((top - bytes) == allocation->address()) { 2209 if ((top - bytes) == allocation->address()) {
2043 allocation_info_.top = allocation->address(); 2210 Address new_top = allocation->address();
2211 ASSERT(new_top >= Page::FromAddress(new_top - 1)->ObjectAreaStart());
2212 allocation_info_.top = new_top;
2044 return true; 2213 return true;
2045 } 2214 }
2046 // There may be a borderline case here where the allocation succeeded, but 2215 // There may be a borderline case here where the allocation succeeded, but
2047 // the limit and top have moved on to a new page. In that case we try again. 2216 // the limit and top have moved on to a new page. In that case we try again.
2048 return ReserveSpace(bytes); 2217 return ReserveSpace(bytes);
2049 } 2218 }
2050 2219
2051 2220
2052 void PagedSpace::PrepareForMarkCompact() { 2221 void PagedSpace::PrepareForMarkCompact() {
2053 // We don't have a linear allocation area while sweeping. It will be restored 2222 // We don't have a linear allocation area while sweeping. It will be restored
2054 // on the first allocation after the sweep. 2223 // on the first allocation after the sweep.
2055 // Mark the old linear allocation area with a free space map so it can be 2224 // Mark the old linear allocation area with a free space map so it can be
2056 // skipped when scanning the heap. 2225 // skipped when scanning the heap.
2057 int old_linear_size = static_cast<int>(limit() - top()); 2226 int old_linear_size = static_cast<int>(limit() - top());
2058 Free(top(), old_linear_size); 2227 AddToFreeLists(top(), old_linear_size);
2059 SetTop(NULL, NULL); 2228 SetTop(NULL, NULL);
2060 2229
2061 // Stop lazy sweeping and clear marking bits for unswept pages. 2230 // Stop lazy sweeping and clear marking bits for unswept pages.
2062 if (first_unswept_page_ != NULL) { 2231 if (first_unswept_page_ != NULL) {
2063 Page* p = first_unswept_page_; 2232 Page* p = first_unswept_page_;
2064 do { 2233 do {
2065 // Do not use ShouldBeSweptLazily predicate here. 2234 // Do not use ShouldBeSweptLazily predicate here.
2066 // New evacuation candidates were selected but they still have 2235 // New evacuation candidates were selected but they still have
2067 // to be swept before collection starts. 2236 // to be swept before collection starts.
2068 if (!p->WasSwept()) { 2237 if (!p->WasSwept()) {
(...skipping 22 matching lines...) Expand all
2091 if (new_top <= allocation_info_.limit) return true; 2260 if (new_top <= allocation_info_.limit) return true;
2092 2261
2093 HeapObject* new_area = free_list_.Allocate(size_in_bytes); 2262 HeapObject* new_area = free_list_.Allocate(size_in_bytes);
2094 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes); 2263 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
2095 if (new_area == NULL) return false; 2264 if (new_area == NULL) return false;
2096 2265
2097 int old_linear_size = static_cast<int>(limit() - top()); 2266 int old_linear_size = static_cast<int>(limit() - top());
2098 // Mark the old linear allocation area with a free space so it can be 2267 // Mark the old linear allocation area with a free space so it can be
2099 // skipped when scanning the heap. This also puts it back in the free list 2268 // skipped when scanning the heap. This also puts it back in the free list
2100 // if it is big enough. 2269 // if it is big enough.
2101 Free(top(), old_linear_size); 2270 AddToFreeLists(top(), old_linear_size);
2102 2271
2103 SetTop(new_area->address(), new_area->address() + size_in_bytes); 2272 SetTop(new_area->address(), new_area->address() + size_in_bytes);
2104 Allocate(size_in_bytes); 2273 // The AddToFreeLists call above will reduce the size of the space in the
2274 // allocation stats. We don't need to add this linear area to the size
2275 // with an Allocate(size_in_bytes) call here, because the
2276 // free_list_.Allocate() call above already accounted for this memory.
2105 return true; 2277 return true;
2106 } 2278 }
2107 2279
2108 2280
2109 // You have to call this last, since the implementation from PagedSpace 2281 // You have to call this last, since the implementation from PagedSpace
2110 // doesn't know that memory was 'promised' to large object space. 2282 // doesn't know that memory was 'promised' to large object space.
2111 bool LargeObjectSpace::ReserveSpace(int bytes) { 2283 bool LargeObjectSpace::ReserveSpace(int bytes) {
2112 return heap()->OldGenerationSpaceAvailable() >= bytes; 2284 return heap()->OldGenerationSpaceAvailable() >= bytes;
2113 } 2285 }
2114 2286
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
2175 2347
2176 // Free list allocation failed and there is no next page. Fail if we have 2348 // Free list allocation failed and there is no next page. Fail if we have
2177 // hit the old generation size limit that should cause a garbage 2349 // hit the old generation size limit that should cause a garbage
2178 // collection. 2350 // collection.
2179 if (!heap()->always_allocate() && 2351 if (!heap()->always_allocate() &&
2180 heap()->OldGenerationAllocationLimitReached()) { 2352 heap()->OldGenerationAllocationLimitReached()) {
2181 return NULL; 2353 return NULL;
2182 } 2354 }
2183 2355
2184 // Try to expand the space and allocate in the new next page. 2356 // Try to expand the space and allocate in the new next page.
2185 if (Expand()) { 2357 if (Expand(size_in_bytes)) {
2186 return free_list_.Allocate(size_in_bytes); 2358 return free_list_.Allocate(size_in_bytes);
2187 } 2359 }
2188 2360
2189 // Last ditch, sweep all the remaining pages to try to find space. This may 2361 // Last ditch, sweep all the remaining pages to try to find space. This may
2190 // cause a pause. 2362 // cause a pause.
2191 if (!IsSweepingComplete()) { 2363 if (!IsSweepingComplete()) {
2192 AdvanceSweeper(kMaxInt); 2364 AdvanceSweeper(kMaxInt);
2193 2365
2194 // Retry the free list allocation. 2366 // Retry the free list allocation.
2195 HeapObject* object = free_list_.Allocate(size_in_bytes); 2367 HeapObject* object = free_list_.Allocate(size_in_bytes);
(...skipping 340 matching lines...) Expand 10 before | Expand all | Expand 10 after
2536 if (previous == NULL) { 2708 if (previous == NULL) {
2537 first_page_ = current; 2709 first_page_ = current;
2538 } else { 2710 } else {
2539 previous->set_next_page(current); 2711 previous->set_next_page(current);
2540 } 2712 }
2541 2713
2542 // Free the chunk. 2714 // Free the chunk.
2543 heap()->mark_compact_collector()->ReportDeleteIfNeeded( 2715 heap()->mark_compact_collector()->ReportDeleteIfNeeded(
2544 object, heap()->isolate()); 2716 object, heap()->isolate());
2545 size_ -= static_cast<int>(page->size()); 2717 size_ -= static_cast<int>(page->size());
2718 ASSERT(size_ >= 0);
2546 objects_size_ -= object->Size(); 2719 objects_size_ -= object->Size();
2547 page_count_--; 2720 page_count_--;
2548 2721
2549 if (is_pointer_object) { 2722 if (is_pointer_object) {
2550 heap()->QueueMemoryChunkForFree(page); 2723 heap()->QueueMemoryChunkForFree(page);
2551 } else { 2724 } else {
2552 heap()->isolate()->memory_allocator()->Free(page); 2725 heap()->isolate()->memory_allocator()->Free(page);
2553 } 2726 }
2554 } 2727 }
2555 } 2728 }
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after
2674 object->ShortPrint(); 2847 object->ShortPrint();
2675 PrintF("\n"); 2848 PrintF("\n");
2676 } 2849 }
2677 printf(" --------------------------------------\n"); 2850 printf(" --------------------------------------\n");
2678 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 2851 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
2679 } 2852 }
2680 2853
2681 #endif // DEBUG 2854 #endif // DEBUG
2682 2855
2683 } } // namespace v8::internal 2856 } } // namespace v8::internal
OLDNEW
« src/spaces.h ('K') | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698