Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(253)

Side by Side Diff: src/spaces.cc

Issue 9233050: Reduce memory use immediately after boot. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 8 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 13 matching lines...) Expand all
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #include "v8.h" 28 #include "v8.h"
29 29
30 #include "liveobjectlist-inl.h" 30 #include "liveobjectlist-inl.h"
31 #include "macro-assembler.h" 31 #include "macro-assembler.h"
32 #include "mark-compact.h" 32 #include "mark-compact.h"
33 #include "platform.h" 33 #include "platform.h"
34 #include "snapshot.h"
34 35
35 namespace v8 { 36 namespace v8 {
36 namespace internal { 37 namespace internal {
37 38
38 39
39 // ---------------------------------------------------------------------------- 40 // ----------------------------------------------------------------------------
40 // HeapObjectIterator 41 // HeapObjectIterator
41 42
42 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { 43 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
43 // You can't actually iterate over the anchor page. It is not a real page, 44 // You can't actually iterate over the anchor page. It is not a real page,
(...skipping 212 matching lines...) Expand 10 before | Expand all | Expand 10 after
256 257
257 258
258 // ----------------------------------------------------------------------------- 259 // -----------------------------------------------------------------------------
259 // MemoryAllocator 260 // MemoryAllocator
260 // 261 //
261 262
262 MemoryAllocator::MemoryAllocator(Isolate* isolate) 263 MemoryAllocator::MemoryAllocator(Isolate* isolate)
263 : isolate_(isolate), 264 : isolate_(isolate),
264 capacity_(0), 265 capacity_(0),
265 capacity_executable_(0), 266 capacity_executable_(0),
266 size_(0), 267 memory_allocator_reserved_(0),
267 size_executable_(0) { 268 size_executable_(0) {
268 } 269 }
269 270
270 271
271 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { 272 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
272 capacity_ = RoundUp(capacity, Page::kPageSize); 273 capacity_ = RoundUp(capacity, Page::kPageSize);
273 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); 274 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
274 ASSERT_GE(capacity_, capacity_executable_); 275 ASSERT_GE(capacity_, capacity_executable_);
275 276
276 size_ = 0; 277 memory_allocator_reserved_ = 0;
277 size_executable_ = 0; 278 size_executable_ = 0;
278 279
279 return true; 280 return true;
280 } 281 }
281 282
282 283
283 void MemoryAllocator::TearDown() { 284 void MemoryAllocator::TearDown() {
284 // Check that spaces were torn down before MemoryAllocator. 285 // Check that spaces were torn down before MemoryAllocator.
285 ASSERT(size_ == 0); 286 CHECK_EQ(memory_allocator_reserved_, static_cast<intptr_t>(0));
Erik Corry 2012/01/31 10:44:44 Cast added here and a few other places to make the
286 // TODO(gc) this will be true again when we fix FreeMemory. 287 // TODO(gc) this will be true again when we fix FreeMemory.
287 // ASSERT(size_executable_ == 0); 288 // ASSERT(size_executable_ == 0);
288 capacity_ = 0; 289 capacity_ = 0;
289 capacity_executable_ = 0; 290 capacity_executable_ = 0;
290 } 291 }
291 292
292 293
293 void MemoryAllocator::FreeMemory(VirtualMemory* reservation, 294 void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
294 Executability executable) { 295 Executability executable) {
295 // TODO(gc) make code_range part of memory allocator? 296 // TODO(gc) make code_range part of memory allocator?
296 ASSERT(reservation->IsReserved()); 297 ASSERT(reservation->IsReserved());
297 size_t size = reservation->size(); 298 size_t size = reservation->size();
298 ASSERT(size_ >= size); 299 ASSERT(memory_allocator_reserved_ >= size);
299 size_ -= size; 300 memory_allocator_reserved_ -= size;
300 301
301 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); 302 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
302 303
303 if (executable == EXECUTABLE) { 304 if (executable == EXECUTABLE) {
304 ASSERT(size_executable_ >= size); 305 ASSERT(size_executable_ >= size);
305 size_executable_ -= size; 306 size_executable_ -= size;
306 } 307 }
307 // Code which is part of the code-range does not have its own VirtualMemory. 308 // Code which is part of the code-range does not have its own VirtualMemory.
308 ASSERT(!isolate_->code_range()->contains( 309 ASSERT(!isolate_->code_range()->contains(
309 static_cast<Address>(reservation->address()))); 310 static_cast<Address>(reservation->address())));
310 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); 311 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
311 reservation->Release(); 312 reservation->Release();
312 } 313 }
313 314
314 315
315 void MemoryAllocator::FreeMemory(Address base, 316 void MemoryAllocator::FreeMemory(Address base,
316 size_t size, 317 size_t size,
317 Executability executable) { 318 Executability executable) {
318 // TODO(gc) make code_range part of memory allocator? 319 // TODO(gc) make code_range part of memory allocator?
319 ASSERT(size_ >= size); 320 ASSERT(memory_allocator_reserved_ >= size);
320 size_ -= size; 321 memory_allocator_reserved_ -= size;
321 322
322 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); 323 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
323 324
324 if (executable == EXECUTABLE) { 325 if (executable == EXECUTABLE) {
325 ASSERT(size_executable_ >= size); 326 ASSERT(size_executable_ >= size);
326 size_executable_ -= size; 327 size_executable_ -= size;
327 } 328 }
328 if (isolate_->code_range()->contains(static_cast<Address>(base))) { 329 if (isolate_->code_range()->contains(static_cast<Address>(base))) {
329 ASSERT(executable == EXECUTABLE); 330 ASSERT(executable == EXECUTABLE);
330 isolate_->code_range()->FreeRawMemory(base, size); 331 isolate_->code_range()->FreeRawMemory(base, size);
331 } else { 332 } else {
332 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); 333 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
333 bool result = VirtualMemory::ReleaseRegion(base, size); 334 bool result = VirtualMemory::ReleaseRegion(base, size);
334 USE(result); 335 USE(result);
335 ASSERT(result); 336 ASSERT(result);
336 } 337 }
337 } 338 }
338 339
339 340
340 Address MemoryAllocator::ReserveAlignedMemory(size_t size, 341 Address MemoryAllocator::ReserveAlignedMemory(size_t size,
341 size_t alignment, 342 size_t alignment,
342 VirtualMemory* controller) { 343 VirtualMemory* controller) {
343 VirtualMemory reservation(size, alignment); 344 VirtualMemory reservation(size, alignment);
344 345
345 if (!reservation.IsReserved()) return NULL; 346 if (!reservation.IsReserved()) return NULL;
346 size_ += reservation.size(); 347 memory_allocator_reserved_ += reservation.size();
347 Address base = RoundUp(static_cast<Address>(reservation.address()), 348 Address base = RoundUp(static_cast<Address>(reservation.address()),
348 alignment); 349 alignment);
349 controller->TakeControl(&reservation); 350 controller->TakeControl(&reservation);
350 return base; 351 return base;
351 } 352 }
352 353
353 354
354 Address MemoryAllocator::AllocateAlignedMemory(size_t size, 355 Address MemoryAllocator::AllocateAlignedMemory(size_t size,
356 size_t reserved_size,
355 size_t alignment, 357 size_t alignment,
356 Executability executable, 358 Executability executable,
357 VirtualMemory* controller) { 359 VirtualMemory* controller) {
360 ASSERT(RoundUp(reserved_size, OS::CommitPageSize()) >=
361 RoundUp(size, OS::CommitPageSize()));
358 VirtualMemory reservation; 362 VirtualMemory reservation;
359 Address base = ReserveAlignedMemory(size, alignment, &reservation); 363 Address base = ReserveAlignedMemory(reserved_size, alignment, &reservation);
360 if (base == NULL) return NULL; 364 if (base == NULL) return NULL;
361 if (!reservation.Commit(base, 365 if (!reservation.Commit(base,
362 size, 366 size,
363 executable == EXECUTABLE)) { 367 executable == EXECUTABLE)) {
364 return NULL; 368 return NULL;
365 } 369 }
366 controller->TakeControl(&reservation); 370 controller->TakeControl(&reservation);
367 return base; 371 return base;
368 } 372 }
369 373
370 374
371 void Page::InitializeAsAnchor(PagedSpace* owner) { 375 void Page::InitializeAsAnchor(PagedSpace* owner) {
372 set_owner(owner); 376 set_owner(owner);
373 set_prev_page(this); 377 set_prev_page(this);
374 set_next_page(this); 378 set_next_page(this);
375 } 379 }
376 380
377 381
382 void Page::CommitMore(intptr_t space_needed) {
383 intptr_t reserved_page_size = reservation_.IsReserved() ?
384 reservation_.size() :
385 Page::kPageSize;
386 ASSERT(size() + space_needed <= reserved_page_size);
387 // At increase the page size by at least 64k (this also rounds to OS page
388 // size).
389 intptr_t expand =
390 Min(reserved_page_size - size(),
391 RoundUp(size() + space_needed, Page::kGrowthUnit) - size());
392 ASSERT(expand <= kPageSize - size());
393 ASSERT(expand <= reserved_page_size - size());
394 Executability executable =
395 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
396 Address old_end = ObjectAreaEnd();
397 if (!VirtualMemory::CommitRegion(old_end, expand, executable)) return;
398
399 set_size(size() + expand);
400
401 PagedSpace* paged_space = reinterpret_cast<PagedSpace*>(owner());
402 paged_space->heap()->isolate()->memory_allocator()->AllocationBookkeeping(
403 paged_space,
404 old_end,
405 0, // No new memory was reserved.
406 expand, // New memory committed.
407 executable);
408 paged_space->IncreaseCapacity(expand);
409
410 // In spaces with alignment requirements (e.g. map space) we have to align
411 // the expanded area with the correct object alignment.
412 Address new_area = RoundUpToObjectAlignment(old_end);
413
414 // In spaces with alignment requirements, this will waste the space for one
415 // object per doubling of the page size until the next GC.
416 paged_space->AddToFreeLists(old_end, new_area - old_end);
417
418 expand -= (new_area - old_end);
419
420 paged_space->AddToFreeLists(new_area, expand);
421 }
422
423
424 Address Page::RoundUpToObjectAlignment(Address a) {
425 PagedSpace* paged_owner = reinterpret_cast<PagedSpace*>(owner());
426 intptr_t off = a - ObjectAreaStart();
427 intptr_t modulus = off % paged_owner->ObjectAlignment();
428 if (modulus == 0) return a;
429 return a - modulus + paged_owner->ObjectAlignment();
430 }
431
432
378 NewSpacePage* NewSpacePage::Initialize(Heap* heap, 433 NewSpacePage* NewSpacePage::Initialize(Heap* heap,
379 Address start, 434 Address start,
380 SemiSpace* semi_space) { 435 SemiSpace* semi_space) {
381 MemoryChunk* chunk = MemoryChunk::Initialize(heap, 436 MemoryChunk* chunk = MemoryChunk::Initialize(heap,
382 start, 437 start,
383 Page::kPageSize, 438 Page::kPageSize,
384 NOT_EXECUTABLE, 439 NOT_EXECUTABLE,
385 semi_space); 440 semi_space);
386 chunk->set_next_chunk(NULL); 441 chunk->set_next_chunk(NULL);
387 chunk->set_prev_chunk(NULL); 442 chunk->set_prev_chunk(NULL);
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
453 ClearFlag(SCAN_ON_SCAVENGE); 508 ClearFlag(SCAN_ON_SCAVENGE);
454 } 509 }
455 next_chunk_->prev_chunk_ = prev_chunk_; 510 next_chunk_->prev_chunk_ = prev_chunk_;
456 prev_chunk_->next_chunk_ = next_chunk_; 511 prev_chunk_->next_chunk_ = next_chunk_;
457 prev_chunk_ = NULL; 512 prev_chunk_ = NULL;
458 next_chunk_ = NULL; 513 next_chunk_ = NULL;
459 } 514 }
460 515
461 516
462 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, 517 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
518 intptr_t committed_body_size,
463 Executability executable, 519 Executability executable,
464 Space* owner) { 520 Space* owner) {
465 size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size; 521 ASSERT(body_size >= committed_body_size);
522 size_t chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + body_size,
523 OS::CommitPageSize());
524 intptr_t committed_chunk_size =
525 committed_body_size + MemoryChunk::kObjectStartOffset;
526 committed_chunk_size = RoundUp(committed_chunk_size, OS::CommitPageSize());
466 Heap* heap = isolate_->heap(); 527 Heap* heap = isolate_->heap();
467 Address base = NULL; 528 Address base = NULL;
468 VirtualMemory reservation; 529 VirtualMemory reservation;
469 if (executable == EXECUTABLE) { 530 if (executable == EXECUTABLE) {
470 // Check executable memory limit. 531 // Check executable memory limit.
471 if (size_executable_ + chunk_size > capacity_executable_) { 532 if (size_executable_ + chunk_size > capacity_executable_) {
472 LOG(isolate_, 533 LOG(isolate_,
473 StringEvent("MemoryAllocator::AllocateRawMemory", 534 StringEvent("MemoryAllocator::AllocateRawMemory",
474 "V8 Executable Allocation capacity exceeded")); 535 "V8 Executable Allocation capacity exceeded"));
475 return NULL; 536 return NULL;
476 } 537 }
477 538
478 // Allocate executable memory either from code range or from the 539 // Allocate executable memory either from code range or from the
479 // OS. 540 // OS.
480 if (isolate_->code_range()->exists()) { 541 if (isolate_->code_range()->exists()) {
481 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); 542 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
482 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), 543 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
483 MemoryChunk::kAlignment)); 544 MemoryChunk::kAlignment));
484 if (base == NULL) return NULL; 545 if (base == NULL) return NULL;
485 size_ += chunk_size; 546 // The AllocateAlignedMemory method will update the memory allocator
486 // Update executable memory size. 547 // memory used, but we are not using that if we have a code range, so
487 size_executable_ += chunk_size; 548 // we update it here.
549 memory_allocator_reserved_ += chunk_size;
488 } else { 550 } else {
489 base = AllocateAlignedMemory(chunk_size, 551 base = AllocateAlignedMemory(committed_chunk_size,
552 chunk_size,
490 MemoryChunk::kAlignment, 553 MemoryChunk::kAlignment,
491 executable, 554 executable,
492 &reservation); 555 &reservation);
493 if (base == NULL) return NULL; 556 if (base == NULL) return NULL;
494 // Update executable memory size.
495 size_executable_ += reservation.size();
496 } 557 }
497 } else { 558 } else {
498 base = AllocateAlignedMemory(chunk_size, 559 base = AllocateAlignedMemory(committed_chunk_size,
560 chunk_size,
499 MemoryChunk::kAlignment, 561 MemoryChunk::kAlignment,
500 executable, 562 executable,
501 &reservation); 563 &reservation);
502 564
503 if (base == NULL) return NULL; 565 if (base == NULL) return NULL;
504 } 566 }
505 567
506 #ifdef DEBUG 568 AllocationBookkeeping(
507 ZapBlock(base, chunk_size); 569 owner, base, chunk_size, committed_chunk_size, executable);
508 #endif
509 isolate_->counters()->memory_allocated()->
510 Increment(static_cast<int>(chunk_size));
511
512 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
513 if (owner != NULL) {
514 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
515 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
516 }
517 570
518 MemoryChunk* result = MemoryChunk::Initialize(heap, 571 MemoryChunk* result = MemoryChunk::Initialize(heap,
519 base, 572 base,
520 chunk_size, 573 committed_chunk_size,
521 executable, 574 executable,
522 owner); 575 owner);
523 result->set_reserved_memory(&reservation); 576 result->set_reserved_memory(&reservation);
524 return result; 577 return result;
525 } 578 }
526 579
527 580
528 Page* MemoryAllocator::AllocatePage(PagedSpace* owner, 581 void MemoryAllocator::AllocationBookkeeping(Space* owner,
582 Address base,
583 intptr_t reserved_chunk_size,
584 intptr_t committed_chunk_size,
585 Executability executable) {
586 if (executable == EXECUTABLE) {
587 // Update executable memory size.
588 size_executable_ += reserved_chunk_size;
589 }
590
591 #ifdef DEBUG
592 ZapBlock(base, committed_chunk_size);
593 #endif
594 isolate_->counters()->memory_allocated()->
595 Increment(static_cast<int>(committed_chunk_size));
596
597 LOG(isolate_, NewEvent("MemoryChunk", base, committed_chunk_size));
598 if (owner != NULL) {
599 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
600 PerformAllocationCallback(
601 space, kAllocationActionAllocate, committed_chunk_size);
602 }
603 }
604
605
606 Page* MemoryAllocator::AllocatePage(intptr_t committed_object_area_size,
607 PagedSpace* owner,
529 Executability executable) { 608 Executability executable) {
530 MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner); 609 ASSERT(committed_object_area_size <= Page::kObjectAreaSize);
610
611 MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize,
612 committed_object_area_size,
613 executable,
614 owner);
531 615
532 if (chunk == NULL) return NULL; 616 if (chunk == NULL) return NULL;
533 617
534 return Page::Initialize(isolate_->heap(), chunk, executable, owner); 618 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
535 } 619 }
536 620
537 621
538 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, 622 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
539 Executability executable, 623 Executability executable,
540 Space* owner) { 624 Space* owner) {
541 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner); 625 MemoryChunk* chunk =
626 AllocateChunk(object_size, object_size, executable, owner);
542 if (chunk == NULL) return NULL; 627 if (chunk == NULL) return NULL;
543 return LargePage::Initialize(isolate_->heap(), chunk); 628 return LargePage::Initialize(isolate_->heap(), chunk);
544 } 629 }
545 630
546 631
547 void MemoryAllocator::Free(MemoryChunk* chunk) { 632 void MemoryAllocator::Free(MemoryChunk* chunk) {
548 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); 633 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
549 if (chunk->owner() != NULL) { 634 if (chunk->owner() != NULL) {
550 ObjectSpace space = 635 ObjectSpace space =
551 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); 636 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
552 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); 637 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
553 } 638 }
554 639
555 delete chunk->slots_buffer(); 640 delete chunk->slots_buffer();
556 delete chunk->skip_list(); 641 delete chunk->skip_list();
557 642
558 VirtualMemory* reservation = chunk->reserved_memory(); 643 VirtualMemory* reservation = chunk->reserved_memory();
559 if (reservation->IsReserved()) { 644 if (reservation->IsReserved()) {
560 FreeMemory(reservation, chunk->executable()); 645 FreeMemory(reservation, chunk->executable());
561 } else { 646 } else {
647 // When we do not have a reservation that is because this allocation
648 // is part of the huge reserved chunk of memory reserved for code on
649 // x64. In that case the size was rounded up to the page size on
650 // allocation so we do the same now when freeing.
562 FreeMemory(chunk->address(), 651 FreeMemory(chunk->address(),
563 chunk->size(), 652 RoundUp(chunk->size(), Page::kPageSize),
564 chunk->executable()); 653 chunk->executable());
565 } 654 }
566 } 655 }
567 656
568 657
569 bool MemoryAllocator::CommitBlock(Address start, 658 bool MemoryAllocator::CommitBlock(Address start,
570 size_t size, 659 size_t size,
571 Executability executable) { 660 Executability executable) {
572 if (!VirtualMemory::CommitRegion(start, size, executable)) return false; 661 if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
573 #ifdef DEBUG 662 #ifdef DEBUG
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
633 memory_allocation_callbacks_.Remove(i); 722 memory_allocation_callbacks_.Remove(i);
634 return; 723 return;
635 } 724 }
636 } 725 }
637 UNREACHABLE(); 726 UNREACHABLE();
638 } 727 }
639 728
640 729
641 #ifdef DEBUG 730 #ifdef DEBUG
642 void MemoryAllocator::ReportStatistics() { 731 void MemoryAllocator::ReportStatistics() {
643 float pct = static_cast<float>(capacity_ - size_) / capacity_; 732 float pct =
733 static_cast<float>(capacity_ - memory_allocator_reserved_) / capacity_;
644 PrintF(" capacity: %" V8_PTR_PREFIX "d" 734 PrintF(" capacity: %" V8_PTR_PREFIX "d"
645 ", used: %" V8_PTR_PREFIX "d" 735 ", used: %" V8_PTR_PREFIX "d"
646 ", available: %%%d\n\n", 736 ", available: %%%d\n\n",
647 capacity_, size_, static_cast<int>(pct*100)); 737 capacity_, memory_allocator_reserved_, static_cast<int>(pct*100));
648 } 738 }
649 #endif 739 #endif
650 740
651 // ----------------------------------------------------------------------------- 741 // -----------------------------------------------------------------------------
652 // MemoryChunk implementation 742 // MemoryChunk implementation
653 743
654 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) { 744 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
655 MemoryChunk* chunk = MemoryChunk::FromAddress(address); 745 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
656 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) { 746 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
657 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by); 747 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
716 Address next = cur + obj->Size(); 806 Address next = cur + obj->Size();
717 if ((cur <= addr) && (addr < next)) return obj; 807 if ((cur <= addr) && (addr < next)) return obj;
718 } 808 }
719 809
720 UNREACHABLE(); 810 UNREACHABLE();
721 return Failure::Exception(); 811 return Failure::Exception();
722 } 812 }
723 813
724 bool PagedSpace::CanExpand() { 814 bool PagedSpace::CanExpand() {
725 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); 815 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
726 ASSERT(Capacity() % Page::kObjectAreaSize == 0);
727 816
728 if (Capacity() == max_capacity_) return false; 817 if (Capacity() == max_capacity_) return false;
729 818
730 ASSERT(Capacity() < max_capacity_); 819 ASSERT(Capacity() < max_capacity_);
731 820
732 // Are we going to exceed capacity for this space? 821 // Are we going to exceed capacity for this space?
733 if ((Capacity() + Page::kPageSize) > max_capacity_) return false; 822 if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
734 823
735 return true; 824 return true;
736 } 825 }
737 826
738 bool PagedSpace::Expand() { 827 bool PagedSpace::Expand(intptr_t size_in_bytes) {
739 if (!CanExpand()) return false; 828 if (!CanExpand()) return false;
740 829
830 Page* last_page = anchor_.prev_page();
831 if (last_page != &anchor_) {
832 // We have run out of linear allocation space. This may be because the
833 // most recently allocated page (stored last in the list) is a small one,
834 // that starts on a page aligned boundary, but has not a full kPageSize of
835 // committed memory. Let's commit more memory for the page.
836 intptr_t reserved_page_size = last_page->reserved_memory()->IsReserved() ?
837 last_page->reserved_memory()->size() :
838 Page::kPageSize;
839 if (last_page->size() < reserved_page_size &&
840 (reserved_page_size - last_page->size()) >= size_in_bytes &&
841 !last_page->IsEvacuationCandidate() &&
842 last_page->WasSwept()) {
843 last_page->CommitMore(size_in_bytes);
844 return true;
845 }
846 }
847
848 // We initially only commit a part of the page. The deserialization of the
Erik Corry 2012/01/31 10:44:44 I updated the comment.
849 // boot snapshot relies on the fact that the allocation area is linear, but
850 // that is assured, as this page will be expanded as needed.
851 int initial = static_cast<int>(
852 Max(OS::CommitPageSize(), static_cast<intptr_t>(Page::kGrowthUnit)));
853
854 ASSERT(Page::kPageSize - initial < Page::kObjectAreaSize);
855
856 intptr_t expansion_size =
857 Max(initial,
858 RoundUpToPowerOf2(MemoryChunk::kObjectStartOffset + size_in_bytes)) -
859 MemoryChunk::kObjectStartOffset;
860
741 Page* p = heap()->isolate()->memory_allocator()-> 861 Page* p = heap()->isolate()->memory_allocator()->
742 AllocatePage(this, executable()); 862 AllocatePage(expansion_size, this, executable());
743 if (p == NULL) return false; 863 if (p == NULL) return false;
744 864
745 ASSERT(Capacity() <= max_capacity_); 865 ASSERT(Capacity() <= max_capacity_);
746 866
747 p->InsertAfter(anchor_.prev_page()); 867 p->InsertAfter(anchor_.prev_page());
748 868
749 return true; 869 return true;
750 } 870 }
751 871
752 872
(...skipping 24 matching lines...) Expand all
777 accounting_stats_.AllocateBytes(size); 897 accounting_stats_.AllocateBytes(size);
778 ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size)); 898 ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size));
779 } else { 899 } else {
780 DecreaseUnsweptFreeBytes(page); 900 DecreaseUnsweptFreeBytes(page);
781 } 901 }
782 902
783 if (Page::FromAllocationTop(allocation_info_.top) == page) { 903 if (Page::FromAllocationTop(allocation_info_.top) == page) {
784 allocation_info_.top = allocation_info_.limit = NULL; 904 allocation_info_.top = allocation_info_.limit = NULL;
785 } 905 }
786 906
907 intptr_t size = page->ObjectAreaEnd() - page->ObjectAreaStart();
908
787 page->Unlink(); 909 page->Unlink();
788 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { 910 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
789 heap()->isolate()->memory_allocator()->Free(page); 911 heap()->isolate()->memory_allocator()->Free(page);
790 } else { 912 } else {
791 heap()->QueueMemoryChunkForFree(page); 913 heap()->QueueMemoryChunkForFree(page);
792 } 914 }
793 915
794 ASSERT(Capacity() > 0); 916 ASSERT(Capacity() > 0);
795 ASSERT(Capacity() % Page::kObjectAreaSize == 0); 917 accounting_stats_.ShrinkSpace(static_cast<int>(size));
796 accounting_stats_.ShrinkSpace(Page::kObjectAreaSize);
797 } 918 }
798 919
799 920
800 void PagedSpace::ReleaseAllUnusedPages() { 921 void PagedSpace::ReleaseAllUnusedPages() {
801 PageIterator it(this); 922 PageIterator it(this);
802 while (it.has_next()) { 923 while (it.has_next()) {
803 Page* page = it.next(); 924 Page* page = it.next();
804 if (!page->WasSwept()) { 925 if (!page->WasSwept()) {
805 if (page->LiveBytes() == 0) ReleasePage(page); 926 if (page->LiveBytes() == 0) ReleasePage(page);
806 } else { 927 } else {
(...skipping 857 matching lines...) Expand 10 before | Expand all | Expand 10 after
1664 // Free lists for old object spaces implementation 1785 // Free lists for old object spaces implementation
1665 1786
1666 void FreeListNode::set_size(Heap* heap, int size_in_bytes) { 1787 void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
1667 ASSERT(size_in_bytes > 0); 1788 ASSERT(size_in_bytes > 0);
1668 ASSERT(IsAligned(size_in_bytes, kPointerSize)); 1789 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1669 1790
1670 // We write a map and possibly size information to the block. If the block 1791 // We write a map and possibly size information to the block. If the block
1671 // is big enough to be a FreeSpace with at least one extra word (the next 1792 // is big enough to be a FreeSpace with at least one extra word (the next
1672 // pointer), we set its map to be the free space map and its size to an 1793 // pointer), we set its map to be the free space map and its size to an
1673 // appropriate array length for the desired size from HeapObject::Size(). 1794 // appropriate array length for the desired size from HeapObject::Size().
1674 // If the block is too small (eg, one or two words), to hold both a size 1795 // If the block is too small (e.g. one or two words), to hold both a size
1675 // field and a next pointer, we give it a filler map that gives it the 1796 // field and a next pointer, we give it a filler map that gives it the
1676 // correct size. 1797 // correct size.
1677 if (size_in_bytes > FreeSpace::kHeaderSize) { 1798 if (size_in_bytes > FreeSpace::kHeaderSize) {
1678 set_map_no_write_barrier(heap->raw_unchecked_free_space_map()); 1799 set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
1679 // Can't use FreeSpace::cast because it fails during deserialization. 1800 // Can't use FreeSpace::cast because it fails during deserialization.
1680 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this); 1801 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
1681 this_as_free_space->set_size(size_in_bytes); 1802 this_as_free_space->set_size(size_in_bytes);
1682 } else if (size_in_bytes == kPointerSize) { 1803 } else if (size_in_bytes == kPointerSize) {
1683 set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map()); 1804 set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
1684 } else if (size_in_bytes == 2 * kPointerSize) { 1805 } else if (size_in_bytes == 2 * kPointerSize) {
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
1768 } else { 1889 } else {
1769 node->set_next(huge_list_); 1890 node->set_next(huge_list_);
1770 huge_list_ = node; 1891 huge_list_ = node;
1771 } 1892 }
1772 available_ += size_in_bytes; 1893 available_ += size_in_bytes;
1773 ASSERT(IsVeryLong() || available_ == SumFreeLists()); 1894 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1774 return 0; 1895 return 0;
1775 } 1896 }
1776 1897
1777 1898
1778 FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) { 1899 FreeListNode* FreeList::PickNodeFromList(FreeListNode** list,
1900 int* node_size,
1901 int minimum_size) {
1779 FreeListNode* node = *list; 1902 FreeListNode* node = *list;
1780 1903
1781 if (node == NULL) return NULL; 1904 if (node == NULL) return NULL;
1782 1905
1906 ASSERT(node->map() == node->GetHeap()->raw_unchecked_free_space_map());
1907
1783 while (node != NULL && 1908 while (node != NULL &&
1784 Page::FromAddress(node->address())->IsEvacuationCandidate()) { 1909 Page::FromAddress(node->address())->IsEvacuationCandidate()) {
1785 available_ -= node->Size(); 1910 available_ -= node->Size();
1786 node = node->next(); 1911 node = node->next();
1787 } 1912 }
1788 1913
1789 if (node != NULL) { 1914 if (node == NULL) {
1790 *node_size = node->Size();
1791 *list = node->next();
1792 } else {
1793 *list = NULL; 1915 *list = NULL;
1916 return NULL;
1794 } 1917 }
1795 1918
1919 // Gets the size without checking the map. When we are booting we have
1920 // a FreeListNode before we have created its map.
1921 intptr_t size = reinterpret_cast<FreeSpace*>(node)->Size();
1922
1923 // We don't search the list for one that fits, preferring to look in the
1924 // list of larger nodes, but we do check the first in the list, because
1925 // if we had to expand the space or page we may have placed an entry that
1926 // was just long enough at the head of one of the lists.
1927 if (size < minimum_size) {
1928 *list = node;
Erik Corry 2012/01/31 10:44:44 This assignment was added.
1929 return NULL;
1930 }
1931
1932 *node_size = static_cast<int>(size);
1933 available_ -= static_cast<int>(size);
1934 *list = node->next();
1935
1796 return node; 1936 return node;
1797 } 1937 }
1798 1938
1799 1939
1800 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) { 1940 FreeListNode* FreeList::FindAbuttingNode(
1941 int size_in_bytes, int* node_size, Address limit, FreeListNode** list_head) {
1942 FreeListNode* first_node = *list_head;
1943 if (first_node != NULL &&
1944 first_node->address() == limit &&
1945 reinterpret_cast<FreeSpace*>(first_node)->Size() >= size_in_bytes &&
1946 !Page::FromAddress(first_node->address())->IsEvacuationCandidate()) {
1947 FreeListNode* answer = first_node;
1948 int size = reinterpret_cast<FreeSpace*>(first_node)->Size();
1949 available_ -= size;
1950 *node_size = size;
1951 *list_head = first_node->next();
1952 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1953 return answer;
1954 }
1955 return NULL;
1956 }
1957
1958
1959 FreeListNode* FreeList::FindNodeFor(int size_in_bytes,
1960 int* node_size,
1961 Address limit) {
1801 FreeListNode* node = NULL; 1962 FreeListNode* node = NULL;
1802 1963
1803 if (size_in_bytes <= kSmallAllocationMax) { 1964 if (limit != NULL) {
1804 node = PickNodeFromList(&small_list_, node_size); 1965 // We may have a memory area at the head of the free list, which abuts the
1966 // old linear allocation area. This happens if the linear allocation area
1967 // has been shortened to allow an incremental marking step to be performed.
1968 // In that case we prefer to return the free memory area that is contiguous
1969 // with the old linear allocation area.
1970 node = FindAbuttingNode(size_in_bytes, node_size, limit, &large_list_);
1971 if (node != NULL) return node;
1972 node = FindAbuttingNode(size_in_bytes, node_size, limit, &huge_list_);
1805 if (node != NULL) return node; 1973 if (node != NULL) return node;
1806 } 1974 }
1807 1975
1808 if (size_in_bytes <= kMediumAllocationMax) { 1976 node = PickNodeFromList(&small_list_, node_size, size_in_bytes);
1809 node = PickNodeFromList(&medium_list_, node_size); 1977 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1978 if (node != NULL) return node;
1979
1980 node = PickNodeFromList(&medium_list_, node_size, size_in_bytes);
1981 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1982 if (node != NULL) return node;
1983
1984 node = PickNodeFromList(&large_list_, node_size, size_in_bytes);
1985 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1986 if (node != NULL) return node;
1987
1988 // The tricky third clause in this for statement is due to the fact that
1989 // PickNodeFromList can cut pages out of the list if they are unavailable for
1990 // new allocation (e.g. if they are on a page that has been scheduled for
1991 // evacuation).
1992 for (FreeListNode** cur = &huge_list_;
1993 *cur != NULL;
1994 cur = (*cur) == NULL ? cur : (*cur)->next_address()) {
1995 node = PickNodeFromList(cur, node_size, size_in_bytes);
1996 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1810 if (node != NULL) return node; 1997 if (node != NULL) return node;
1811 } 1998 }
1812 1999
1813 if (size_in_bytes <= kLargeAllocationMax) {
1814 node = PickNodeFromList(&large_list_, node_size);
1815 if (node != NULL) return node;
1816 }
1817
1818 for (FreeListNode** cur = &huge_list_;
1819 *cur != NULL;
1820 cur = (*cur)->next_address()) {
1821 FreeListNode* cur_node = *cur;
1822 while (cur_node != NULL &&
1823 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
1824 available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
1825 cur_node = cur_node->next();
1826 }
1827
1828 *cur = cur_node;
1829 if (cur_node == NULL) break;
1830
1831 ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
1832 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
1833 int size = cur_as_free_space->Size();
1834 if (size >= size_in_bytes) {
1835 // Large enough node found. Unlink it from the list.
1836 node = *cur;
1837 *node_size = size;
1838 *cur = node->next();
1839 break;
1840 }
1841 }
1842
1843 return node; 2000 return node;
1844 } 2001 }
1845 2002
1846 2003
1847 // Allocation on the old space free list. If it succeeds then a new linear 2004 // Allocation on the old space free list. If it succeeds then a new linear
1848 // allocation space has been set up with the top and limit of the space. If 2005 // allocation space has been set up with the top and limit of the space. If
1849 // the allocation fails then NULL is returned, and the caller can perform a GC 2006 // the allocation fails then NULL is returned, and the caller can perform a GC
1850 // or allocate a new page before retrying. 2007 // or allocate a new page before retrying.
1851 HeapObject* FreeList::Allocate(int size_in_bytes) { 2008 HeapObject* FreeList::Allocate(int size_in_bytes) {
1852 ASSERT(0 < size_in_bytes); 2009 ASSERT(0 < size_in_bytes);
1853 ASSERT(size_in_bytes <= kMaxBlockSize); 2010 ASSERT(size_in_bytes <= kMaxBlockSize);
1854 ASSERT(IsAligned(size_in_bytes, kPointerSize)); 2011 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1855 // Don't free list allocate if there is linear space available. 2012 // Don't free list allocate if there is linear space available.
1856 ASSERT(owner_->limit() - owner_->top() < size_in_bytes); 2013 ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
1857 2014
1858 int new_node_size = 0; 2015 int new_node_size = 0;
1859 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); 2016 FreeListNode* new_node =
2017 FindNodeFor(size_in_bytes, &new_node_size, owner_->limit());
1860 if (new_node == NULL) return NULL; 2018 if (new_node == NULL) return NULL;
1861 2019
1862 available_ -= new_node_size; 2020 if (new_node->address() == owner_->limit()) {
2021 // The new freelist node we were given is an extension of the one we had
2022 // last. This is a common thing to happen when we extend a small page by
2023 // committing more memory. In this case we just add the new node to the
2024 // linear allocation area and recurse.
2025 owner_->Allocate(new_node_size);
2026 owner_->SetTop(owner_->top(), new_node->address() + new_node_size);
2027 MaybeObject* allocation = owner_->AllocateRaw(size_in_bytes);
2028 Object* answer;
2029 if (!allocation->ToObject(&answer)) return NULL;
2030 return HeapObject::cast(answer);
2031 }
2032
1863 ASSERT(IsVeryLong() || available_ == SumFreeLists()); 2033 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1864 2034
1865 int bytes_left = new_node_size - size_in_bytes; 2035 int bytes_left = new_node_size - size_in_bytes;
1866 ASSERT(bytes_left >= 0); 2036 ASSERT(bytes_left >= 0);
1867 2037
1868 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); 2038 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
1869 // Mark the old linear allocation area with a free space map so it can be 2039 // Mark the old linear allocation area with a free space map so it can be
1870 // skipped when scanning the heap. This also puts it back in the free list 2040 // skipped when scanning the heap. This also puts it back in the free list
1871 // if it is big enough. 2041 // if it is big enough.
1872 owner_->Free(owner_->top(), old_linear_size); 2042 if (old_linear_size != 0) {
2043 owner_->AddToFreeLists(owner_->top(), old_linear_size);
2044 }
1873 2045
1874 #ifdef DEBUG 2046 #ifdef DEBUG
1875 for (int i = 0; i < size_in_bytes / kPointerSize; i++) { 2047 for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
1876 reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0); 2048 reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0);
1877 } 2049 }
1878 #endif 2050 #endif
1879 2051
1880 owner_->heap()->incremental_marking()->OldSpaceStep( 2052 owner_->heap()->incremental_marking()->OldSpaceStep(
1881 size_in_bytes - old_linear_size); 2053 size_in_bytes - old_linear_size);
1882 2054
1883 // The old-space-step might have finished sweeping and restarted marking. 2055 // The old-space-step might have finished sweeping and restarted marking.
1884 // Verify that it did not turn the page of the new node into an evacuation 2056 // Verify that it did not turn the page of the new node into an evacuation
1885 // candidate. 2057 // candidate.
1886 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); 2058 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
1887 2059
1888 const int kThreshold = IncrementalMarking::kAllocatedThreshold; 2060 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
1889 2061
1890 // Memory in the linear allocation area is counted as allocated. We may free 2062 // Memory in the linear allocation area is counted as allocated. We may free
1891 // a little of this again immediately - see below. 2063 // a little of this again immediately - see below.
1892 owner_->Allocate(new_node_size); 2064 owner_->Allocate(new_node_size);
1893 2065
1894 if (bytes_left > kThreshold && 2066 if (bytes_left > kThreshold &&
1895 owner_->heap()->incremental_marking()->IsMarkingIncomplete() && 2067 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
1896 FLAG_incremental_marking_steps) { 2068 FLAG_incremental_marking_steps) {
1897 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); 2069 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
1898 // We don't want to give too large linear areas to the allocator while 2070 // We don't want to give too large linear areas to the allocator while
1899 // incremental marking is going on, because we won't check again whether 2071 // incremental marking is going on, because we won't check again whether
1900 // we want to do another increment until the linear area is used up. 2072 // we want to do another increment until the linear area is used up.
1901 owner_->Free(new_node->address() + size_in_bytes + linear_size, 2073 owner_->AddToFreeLists(new_node->address() + size_in_bytes + linear_size,
1902 new_node_size - size_in_bytes - linear_size); 2074 new_node_size - size_in_bytes - linear_size);
1903 owner_->SetTop(new_node->address() + size_in_bytes, 2075 owner_->SetTop(new_node->address() + size_in_bytes,
1904 new_node->address() + size_in_bytes + linear_size); 2076 new_node->address() + size_in_bytes + linear_size);
1905 } else if (bytes_left > 0) { 2077 } else if (bytes_left > 0) {
1906 // Normally we give the rest of the node to the allocator as its new 2078 // Normally we give the rest of the node to the allocator as its new
1907 // linear allocation area. 2079 // linear allocation area.
1908 owner_->SetTop(new_node->address() + size_in_bytes, 2080 owner_->SetTop(new_node->address() + size_in_bytes,
1909 new_node->address() + new_node_size); 2081 new_node->address() + new_node_size);
1910 } else { 2082 } else {
2083 ASSERT(bytes_left == 0);
1911 // TODO(gc) Try not freeing linear allocation region when bytes_left 2084 // TODO(gc) Try not freeing linear allocation region when bytes_left
1912 // are zero. 2085 // are zero.
1913 owner_->SetTop(NULL, NULL); 2086 owner_->SetTop(NULL, NULL);
1914 } 2087 }
1915 2088
1916 return new_node; 2089 return new_node;
1917 } 2090 }
1918 2091
1919 2092
1920 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) { 2093 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
2033 // or because we have lowered the limit in order to get periodic incremental 2206 // or because we have lowered the limit in order to get periodic incremental
2034 // marking. The most reliable way to ensure that there is linear space is 2207 // marking. The most reliable way to ensure that there is linear space is
2035 // to do the allocation, then rewind the limit. 2208 // to do the allocation, then rewind the limit.
2036 ASSERT(bytes <= InitialCapacity()); 2209 ASSERT(bytes <= InitialCapacity());
2037 MaybeObject* maybe = AllocateRaw(bytes); 2210 MaybeObject* maybe = AllocateRaw(bytes);
2038 Object* object = NULL; 2211 Object* object = NULL;
2039 if (!maybe->ToObject(&object)) return false; 2212 if (!maybe->ToObject(&object)) return false;
2040 HeapObject* allocation = HeapObject::cast(object); 2213 HeapObject* allocation = HeapObject::cast(object);
2041 Address top = allocation_info_.top; 2214 Address top = allocation_info_.top;
2042 if ((top - bytes) == allocation->address()) { 2215 if ((top - bytes) == allocation->address()) {
2043 allocation_info_.top = allocation->address(); 2216 Address new_top = allocation->address();
2217 ASSERT(new_top >= Page::FromAddress(new_top - 1)->ObjectAreaStart());
2218 allocation_info_.top = new_top;
2044 return true; 2219 return true;
2045 } 2220 }
2046 // There may be a borderline case here where the allocation succeeded, but 2221 // There may be a borderline case here where the allocation succeeded, but
2047 // the limit and top have moved on to a new page. In that case we try again. 2222 // the limit and top have moved on to a new page. In that case we try again.
2048 return ReserveSpace(bytes); 2223 return ReserveSpace(bytes);
2049 } 2224 }
2050 2225
2051 2226
2052 void PagedSpace::PrepareForMarkCompact() { 2227 void PagedSpace::PrepareForMarkCompact() {
2053 // We don't have a linear allocation area while sweeping. It will be restored 2228 // We don't have a linear allocation area while sweeping. It will be restored
2054 // on the first allocation after the sweep. 2229 // on the first allocation after the sweep.
2055 // Mark the old linear allocation area with a free space map so it can be 2230 // Mark the old linear allocation area with a free space map so it can be
2056 // skipped when scanning the heap. 2231 // skipped when scanning the heap.
2057 int old_linear_size = static_cast<int>(limit() - top()); 2232 int old_linear_size = static_cast<int>(limit() - top());
2058 Free(top(), old_linear_size); 2233 AddToFreeLists(top(), old_linear_size);
2059 SetTop(NULL, NULL); 2234 SetTop(NULL, NULL);
2060 2235
2061 // Stop lazy sweeping and clear marking bits for unswept pages. 2236 // Stop lazy sweeping and clear marking bits for unswept pages.
2062 if (first_unswept_page_ != NULL) { 2237 if (first_unswept_page_ != NULL) {
2063 Page* p = first_unswept_page_; 2238 Page* p = first_unswept_page_;
2064 do { 2239 do {
2065 // Do not use ShouldBeSweptLazily predicate here. 2240 // Do not use ShouldBeSweptLazily predicate here.
2066 // New evacuation candidates were selected but they still have 2241 // New evacuation candidates were selected but they still have
2067 // to be swept before collection starts. 2242 // to be swept before collection starts.
2068 if (!p->WasSwept()) { 2243 if (!p->WasSwept()) {
(...skipping 22 matching lines...) Expand all
2091 if (new_top <= allocation_info_.limit) return true; 2266 if (new_top <= allocation_info_.limit) return true;
2092 2267
2093 HeapObject* new_area = free_list_.Allocate(size_in_bytes); 2268 HeapObject* new_area = free_list_.Allocate(size_in_bytes);
2094 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes); 2269 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
2095 if (new_area == NULL) return false; 2270 if (new_area == NULL) return false;
2096 2271
2097 int old_linear_size = static_cast<int>(limit() - top()); 2272 int old_linear_size = static_cast<int>(limit() - top());
2098 // Mark the old linear allocation area with a free space so it can be 2273 // Mark the old linear allocation area with a free space so it can be
2099 // skipped when scanning the heap. This also puts it back in the free list 2274 // skipped when scanning the heap. This also puts it back in the free list
2100 // if it is big enough. 2275 // if it is big enough.
2101 Free(top(), old_linear_size); 2276 AddToFreeLists(top(), old_linear_size);
2102 2277
2103 SetTop(new_area->address(), new_area->address() + size_in_bytes); 2278 SetTop(new_area->address(), new_area->address() + size_in_bytes);
2104 Allocate(size_in_bytes); 2279 // The AddToFreeLists call above will reduce the size of the space in the
2280 // allocation stats. We don't need to add this linear area to the size
2281 // with an Allocate(size_in_bytes) call here, because the
2282 // free_list_.Allocate() call above already accounted for this memory.
2105 return true; 2283 return true;
2106 } 2284 }
2107 2285
2108 2286
2109 // You have to call this last, since the implementation from PagedSpace 2287 // You have to call this last, since the implementation from PagedSpace
2110 // doesn't know that memory was 'promised' to large object space. 2288 // doesn't know that memory was 'promised' to large object space.
2111 bool LargeObjectSpace::ReserveSpace(int bytes) { 2289 bool LargeObjectSpace::ReserveSpace(int bytes) {
2112 return heap()->OldGenerationSpaceAvailable() >= bytes; 2290 return heap()->OldGenerationSpaceAvailable() >= bytes;
2113 } 2291 }
2114 2292
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
2175 2353
2176 // Free list allocation failed and there is no next page. Fail if we have 2354 // Free list allocation failed and there is no next page. Fail if we have
2177 // hit the old generation size limit that should cause a garbage 2355 // hit the old generation size limit that should cause a garbage
2178 // collection. 2356 // collection.
2179 if (!heap()->always_allocate() && 2357 if (!heap()->always_allocate() &&
2180 heap()->OldGenerationAllocationLimitReached()) { 2358 heap()->OldGenerationAllocationLimitReached()) {
2181 return NULL; 2359 return NULL;
2182 } 2360 }
2183 2361
2184 // Try to expand the space and allocate in the new next page. 2362 // Try to expand the space and allocate in the new next page.
2185 if (Expand()) { 2363 if (Expand(size_in_bytes)) {
2186 return free_list_.Allocate(size_in_bytes); 2364 return free_list_.Allocate(size_in_bytes);
2187 } 2365 }
2188 2366
2189 // Last ditch, sweep all the remaining pages to try to find space. This may 2367 // Last ditch, sweep all the remaining pages to try to find space. This may
2190 // cause a pause. 2368 // cause a pause.
2191 if (!IsSweepingComplete()) { 2369 if (!IsSweepingComplete()) {
2192 AdvanceSweeper(kMaxInt); 2370 AdvanceSweeper(kMaxInt);
2193 2371
2194 // Retry the free list allocation. 2372 // Retry the free list allocation.
2195 HeapObject* object = free_list_.Allocate(size_in_bytes); 2373 HeapObject* object = free_list_.Allocate(size_in_bytes);
(...skipping 340 matching lines...) Expand 10 before | Expand all | Expand 10 after
2536 if (previous == NULL) { 2714 if (previous == NULL) {
2537 first_page_ = current; 2715 first_page_ = current;
2538 } else { 2716 } else {
2539 previous->set_next_page(current); 2717 previous->set_next_page(current);
2540 } 2718 }
2541 2719
2542 // Free the chunk. 2720 // Free the chunk.
2543 heap()->mark_compact_collector()->ReportDeleteIfNeeded( 2721 heap()->mark_compact_collector()->ReportDeleteIfNeeded(
2544 object, heap()->isolate()); 2722 object, heap()->isolate());
2545 size_ -= static_cast<int>(page->size()); 2723 size_ -= static_cast<int>(page->size());
2724 ASSERT(size_ >= 0);
2546 objects_size_ -= object->Size(); 2725 objects_size_ -= object->Size();
2547 page_count_--; 2726 page_count_--;
2548 2727
2549 if (is_pointer_object) { 2728 if (is_pointer_object) {
2550 heap()->QueueMemoryChunkForFree(page); 2729 heap()->QueueMemoryChunkForFree(page);
2551 } else { 2730 } else {
2552 heap()->isolate()->memory_allocator()->Free(page); 2731 heap()->isolate()->memory_allocator()->Free(page);
2553 } 2732 }
2554 } 2733 }
2555 } 2734 }
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after
2674 object->ShortPrint(); 2853 object->ShortPrint();
2675 PrintF("\n"); 2854 PrintF("\n");
2676 } 2855 }
2677 printf(" --------------------------------------\n"); 2856 printf(" --------------------------------------\n");
2678 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 2857 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
2679 } 2858 }
2680 2859
2681 #endif // DEBUG 2860 #endif // DEBUG
2682 2861
2683 } } // namespace v8::internal 2862 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698