| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 13 matching lines...) Expand all Loading... |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 #include "v8.h" | 28 #include "v8.h" |
| 29 | 29 |
| 30 #include "liveobjectlist-inl.h" | 30 #include "liveobjectlist-inl.h" |
| 31 #include "macro-assembler.h" | 31 #include "macro-assembler.h" |
| 32 #include "mark-compact.h" | 32 #include "mark-compact.h" |
| 33 #include "platform.h" | 33 #include "platform.h" |
| 34 #include "snapshot.h" | |
| 35 | 34 |
| 36 namespace v8 { | 35 namespace v8 { |
| 37 namespace internal { | 36 namespace internal { |
| 38 | 37 |
| 39 | 38 |
| 40 // ---------------------------------------------------------------------------- | 39 // ---------------------------------------------------------------------------- |
| 41 // HeapObjectIterator | 40 // HeapObjectIterator |
| 42 | 41 |
| 43 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { | 42 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { |
| 44 // You can't actually iterate over the anchor page. It is not a real page, | 43 // You can't actually iterate over the anchor page. It is not a real page, |
| (...skipping 212 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 257 | 256 |
| 258 | 257 |
| 259 // ----------------------------------------------------------------------------- | 258 // ----------------------------------------------------------------------------- |
| 260 // MemoryAllocator | 259 // MemoryAllocator |
| 261 // | 260 // |
| 262 | 261 |
| 263 MemoryAllocator::MemoryAllocator(Isolate* isolate) | 262 MemoryAllocator::MemoryAllocator(Isolate* isolate) |
| 264 : isolate_(isolate), | 263 : isolate_(isolate), |
| 265 capacity_(0), | 264 capacity_(0), |
| 266 capacity_executable_(0), | 265 capacity_executable_(0), |
| 267 memory_allocator_reserved_(0), | 266 size_(0), |
| 268 size_executable_(0) { | 267 size_executable_(0) { |
| 269 } | 268 } |
| 270 | 269 |
| 271 | 270 |
| 272 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { | 271 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { |
| 273 capacity_ = RoundUp(capacity, Page::kPageSize); | 272 capacity_ = RoundUp(capacity, Page::kPageSize); |
| 274 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); | 273 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); |
| 275 ASSERT_GE(capacity_, capacity_executable_); | 274 ASSERT_GE(capacity_, capacity_executable_); |
| 276 | 275 |
| 277 memory_allocator_reserved_ = 0; | 276 size_ = 0; |
| 278 size_executable_ = 0; | 277 size_executable_ = 0; |
| 279 | 278 |
| 280 return true; | 279 return true; |
| 281 } | 280 } |
| 282 | 281 |
| 283 | 282 |
| 284 void MemoryAllocator::TearDown() { | 283 void MemoryAllocator::TearDown() { |
| 285 // Check that spaces were torn down before MemoryAllocator. | 284 // Check that spaces were torn down before MemoryAllocator. |
| 286 CHECK_EQ(static_cast<int64_t>(memory_allocator_reserved_), | 285 ASSERT(size_ == 0); |
| 287 static_cast<int64_t>(0)); | |
| 288 // TODO(gc) this will be true again when we fix FreeMemory. | 286 // TODO(gc) this will be true again when we fix FreeMemory. |
| 289 // ASSERT(size_executable_ == 0); | 287 // ASSERT(size_executable_ == 0); |
| 290 capacity_ = 0; | 288 capacity_ = 0; |
| 291 capacity_executable_ = 0; | 289 capacity_executable_ = 0; |
| 292 } | 290 } |
| 293 | 291 |
| 294 | 292 |
| 295 void MemoryAllocator::FreeMemory(VirtualMemory* reservation, | 293 void MemoryAllocator::FreeMemory(VirtualMemory* reservation, |
| 296 Executability executable) { | 294 Executability executable) { |
| 297 // TODO(gc) make code_range part of memory allocator? | 295 // TODO(gc) make code_range part of memory allocator? |
| 298 ASSERT(reservation->IsReserved()); | 296 ASSERT(reservation->IsReserved()); |
| 299 size_t size = reservation->size(); | 297 size_t size = reservation->size(); |
| 300 ASSERT(memory_allocator_reserved_ >= size); | 298 ASSERT(size_ >= size); |
| 301 memory_allocator_reserved_ -= size; | 299 size_ -= size; |
| 302 | 300 |
| 303 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); | 301 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
| 304 | 302 |
| 305 if (executable == EXECUTABLE) { | 303 if (executable == EXECUTABLE) { |
| 306 ASSERT(size_executable_ >= size); | 304 ASSERT(size_executable_ >= size); |
| 307 size_executable_ -= size; | 305 size_executable_ -= size; |
| 308 } | 306 } |
| 309 // Code which is part of the code-range does not have its own VirtualMemory. | 307 // Code which is part of the code-range does not have its own VirtualMemory. |
| 310 ASSERT(!isolate_->code_range()->contains( | 308 ASSERT(!isolate_->code_range()->contains( |
| 311 static_cast<Address>(reservation->address()))); | 309 static_cast<Address>(reservation->address()))); |
| 312 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); | 310 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); |
| 313 reservation->Release(); | 311 reservation->Release(); |
| 314 } | 312 } |
| 315 | 313 |
| 316 | 314 |
| 317 void MemoryAllocator::FreeMemory(Address base, | 315 void MemoryAllocator::FreeMemory(Address base, |
| 318 size_t size, | 316 size_t size, |
| 319 Executability executable) { | 317 Executability executable) { |
| 320 // TODO(gc) make code_range part of memory allocator? | 318 // TODO(gc) make code_range part of memory allocator? |
| 321 ASSERT(memory_allocator_reserved_ >= size); | 319 ASSERT(size_ >= size); |
| 322 memory_allocator_reserved_ -= size; | 320 size_ -= size; |
| 323 | 321 |
| 324 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); | 322 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
| 325 | 323 |
| 326 if (executable == EXECUTABLE) { | 324 if (executable == EXECUTABLE) { |
| 327 ASSERT(size_executable_ >= size); | 325 ASSERT(size_executable_ >= size); |
| 328 size_executable_ -= size; | 326 size_executable_ -= size; |
| 329 } | 327 } |
| 330 if (isolate_->code_range()->contains(static_cast<Address>(base))) { | 328 if (isolate_->code_range()->contains(static_cast<Address>(base))) { |
| 331 ASSERT(executable == EXECUTABLE); | 329 ASSERT(executable == EXECUTABLE); |
| 332 isolate_->code_range()->FreeRawMemory(base, size); | 330 isolate_->code_range()->FreeRawMemory(base, size); |
| 333 } else { | 331 } else { |
| 334 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); | 332 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); |
| 335 bool result = VirtualMemory::ReleaseRegion(base, size); | 333 bool result = VirtualMemory::ReleaseRegion(base, size); |
| 336 USE(result); | 334 USE(result); |
| 337 ASSERT(result); | 335 ASSERT(result); |
| 338 } | 336 } |
| 339 } | 337 } |
| 340 | 338 |
| 341 | 339 |
| 342 Address MemoryAllocator::ReserveAlignedMemory(size_t size, | 340 Address MemoryAllocator::ReserveAlignedMemory(size_t size, |
| 343 size_t alignment, | 341 size_t alignment, |
| 344 VirtualMemory* controller) { | 342 VirtualMemory* controller) { |
| 345 VirtualMemory reservation(size, alignment); | 343 VirtualMemory reservation(size, alignment); |
| 346 | 344 |
| 347 if (!reservation.IsReserved()) return NULL; | 345 if (!reservation.IsReserved()) return NULL; |
| 348 memory_allocator_reserved_ += reservation.size(); | 346 size_ += reservation.size(); |
| 349 Address base = RoundUp(static_cast<Address>(reservation.address()), | 347 Address base = RoundUp(static_cast<Address>(reservation.address()), |
| 350 alignment); | 348 alignment); |
| 351 controller->TakeControl(&reservation); | 349 controller->TakeControl(&reservation); |
| 352 return base; | 350 return base; |
| 353 } | 351 } |
| 354 | 352 |
| 355 | 353 |
| 356 Address MemoryAllocator::AllocateAlignedMemory(size_t size, | 354 Address MemoryAllocator::AllocateAlignedMemory(size_t size, |
| 357 size_t reserved_size, | |
| 358 size_t alignment, | 355 size_t alignment, |
| 359 Executability executable, | 356 Executability executable, |
| 360 VirtualMemory* controller) { | 357 VirtualMemory* controller) { |
| 361 ASSERT(RoundUp(reserved_size, OS::CommitPageSize()) >= | |
| 362 RoundUp(size, OS::CommitPageSize())); | |
| 363 VirtualMemory reservation; | 358 VirtualMemory reservation; |
| 364 Address base = ReserveAlignedMemory(reserved_size, alignment, &reservation); | 359 Address base = ReserveAlignedMemory(size, alignment, &reservation); |
| 365 if (base == NULL) return NULL; | 360 if (base == NULL) return NULL; |
| 366 if (!reservation.Commit(base, | 361 if (!reservation.Commit(base, |
| 367 size, | 362 size, |
| 368 executable == EXECUTABLE)) { | 363 executable == EXECUTABLE)) { |
| 369 return NULL; | 364 return NULL; |
| 370 } | 365 } |
| 371 controller->TakeControl(&reservation); | 366 controller->TakeControl(&reservation); |
| 372 return base; | 367 return base; |
| 373 } | 368 } |
| 374 | 369 |
| 375 | 370 |
| 376 void Page::InitializeAsAnchor(PagedSpace* owner) { | 371 void Page::InitializeAsAnchor(PagedSpace* owner) { |
| 377 set_owner(owner); | 372 set_owner(owner); |
| 378 set_prev_page(this); | 373 set_prev_page(this); |
| 379 set_next_page(this); | 374 set_next_page(this); |
| 380 } | 375 } |
| 381 | 376 |
| 382 | 377 |
| 383 void Page::CommitMore(intptr_t space_needed) { | |
| 384 intptr_t reserved_page_size = reservation_.IsReserved() ? | |
| 385 reservation_.size() : | |
| 386 Page::kPageSize; | |
| 387 ASSERT(size() + space_needed <= reserved_page_size); | |
| 388 // At increase the page size by at least 64k (this also rounds to OS page | |
| 389 // size). | |
| 390 intptr_t expand = | |
| 391 Min(reserved_page_size - size(), | |
| 392 RoundUp(size() + space_needed, Page::kGrowthUnit) - size()); | |
| 393 ASSERT(expand <= kPageSize - size()); | |
| 394 ASSERT(expand <= reserved_page_size - size()); | |
| 395 Executability executable = | |
| 396 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; | |
| 397 Address old_end = ObjectAreaEnd(); | |
| 398 if (!VirtualMemory::CommitRegion(old_end, expand, executable)) return; | |
| 399 | |
| 400 set_size(size() + expand); | |
| 401 | |
| 402 PagedSpace* paged_space = reinterpret_cast<PagedSpace*>(owner()); | |
| 403 paged_space->heap()->isolate()->memory_allocator()->AllocationBookkeeping( | |
| 404 paged_space, | |
| 405 old_end, | |
| 406 0, // No new memory was reserved. | |
| 407 expand, // New memory committed. | |
| 408 executable); | |
| 409 paged_space->IncreaseCapacity(expand); | |
| 410 | |
| 411 // In spaces with alignment requirements (e.g. map space) we have to align | |
| 412 // the expanded area with the correct object alignment. | |
| 413 Address new_area = RoundUpToObjectAlignment(old_end); | |
| 414 | |
| 415 // In spaces with alignment requirements, this will waste the space for one | |
| 416 // object per doubling of the page size until the next GC. | |
| 417 paged_space->AddToFreeLists(old_end, new_area - old_end); | |
| 418 | |
| 419 expand -= (new_area - old_end); | |
| 420 | |
| 421 paged_space->AddToFreeLists(new_area, expand); | |
| 422 } | |
| 423 | |
| 424 | |
| 425 Address Page::RoundUpToObjectAlignment(Address a) { | |
| 426 PagedSpace* paged_owner = reinterpret_cast<PagedSpace*>(owner()); | |
| 427 intptr_t off = a - ObjectAreaStart(); | |
| 428 intptr_t modulus = off % paged_owner->ObjectAlignment(); | |
| 429 if (modulus == 0) return a; | |
| 430 return a - modulus + paged_owner->ObjectAlignment(); | |
| 431 } | |
| 432 | |
| 433 | |
| 434 NewSpacePage* NewSpacePage::Initialize(Heap* heap, | 378 NewSpacePage* NewSpacePage::Initialize(Heap* heap, |
| 435 Address start, | 379 Address start, |
| 436 SemiSpace* semi_space) { | 380 SemiSpace* semi_space) { |
| 437 MemoryChunk* chunk = MemoryChunk::Initialize(heap, | 381 MemoryChunk* chunk = MemoryChunk::Initialize(heap, |
| 438 start, | 382 start, |
| 439 Page::kPageSize, | 383 Page::kPageSize, |
| 440 NOT_EXECUTABLE, | 384 NOT_EXECUTABLE, |
| 441 semi_space); | 385 semi_space); |
| 442 chunk->set_next_chunk(NULL); | 386 chunk->set_next_chunk(NULL); |
| 443 chunk->set_prev_chunk(NULL); | 387 chunk->set_prev_chunk(NULL); |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 509 ClearFlag(SCAN_ON_SCAVENGE); | 453 ClearFlag(SCAN_ON_SCAVENGE); |
| 510 } | 454 } |
| 511 next_chunk_->prev_chunk_ = prev_chunk_; | 455 next_chunk_->prev_chunk_ = prev_chunk_; |
| 512 prev_chunk_->next_chunk_ = next_chunk_; | 456 prev_chunk_->next_chunk_ = next_chunk_; |
| 513 prev_chunk_ = NULL; | 457 prev_chunk_ = NULL; |
| 514 next_chunk_ = NULL; | 458 next_chunk_ = NULL; |
| 515 } | 459 } |
| 516 | 460 |
| 517 | 461 |
| 518 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, | 462 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, |
| 519 intptr_t committed_body_size, | |
| 520 Executability executable, | 463 Executability executable, |
| 521 Space* owner) { | 464 Space* owner) { |
| 522 ASSERT(body_size >= committed_body_size); | 465 size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size; |
| 523 size_t chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + body_size, | |
| 524 OS::CommitPageSize()); | |
| 525 intptr_t committed_chunk_size = | |
| 526 committed_body_size + MemoryChunk::kObjectStartOffset; | |
| 527 committed_chunk_size = RoundUp(committed_chunk_size, OS::CommitPageSize()); | |
| 528 Heap* heap = isolate_->heap(); | 466 Heap* heap = isolate_->heap(); |
| 529 Address base = NULL; | 467 Address base = NULL; |
| 530 VirtualMemory reservation; | 468 VirtualMemory reservation; |
| 531 if (executable == EXECUTABLE) { | 469 if (executable == EXECUTABLE) { |
| 532 // Check executable memory limit. | 470 // Check executable memory limit. |
| 533 if (size_executable_ + chunk_size > capacity_executable_) { | 471 if (size_executable_ + chunk_size > capacity_executable_) { |
| 534 LOG(isolate_, | 472 LOG(isolate_, |
| 535 StringEvent("MemoryAllocator::AllocateRawMemory", | 473 StringEvent("MemoryAllocator::AllocateRawMemory", |
| 536 "V8 Executable Allocation capacity exceeded")); | 474 "V8 Executable Allocation capacity exceeded")); |
| 537 return NULL; | 475 return NULL; |
| 538 } | 476 } |
| 539 | 477 |
| 540 // Allocate executable memory either from code range or from the | 478 // Allocate executable memory either from code range or from the |
| 541 // OS. | 479 // OS. |
| 542 if (isolate_->code_range()->exists()) { | 480 if (isolate_->code_range()->exists()) { |
| 543 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); | 481 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); |
| 544 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), | 482 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), |
| 545 MemoryChunk::kAlignment)); | 483 MemoryChunk::kAlignment)); |
| 546 if (base == NULL) return NULL; | 484 if (base == NULL) return NULL; |
| 547 // The AllocateAlignedMemory method will update the memory allocator | 485 size_ += chunk_size; |
| 548 // memory used, but we are not using that if we have a code range, so | 486 // Update executable memory size. |
| 549 // we update it here. | 487 size_executable_ += chunk_size; |
| 550 memory_allocator_reserved_ += chunk_size; | |
| 551 } else { | 488 } else { |
| 552 base = AllocateAlignedMemory(committed_chunk_size, | 489 base = AllocateAlignedMemory(chunk_size, |
| 553 chunk_size, | |
| 554 MemoryChunk::kAlignment, | 490 MemoryChunk::kAlignment, |
| 555 executable, | 491 executable, |
| 556 &reservation); | 492 &reservation); |
| 557 if (base == NULL) return NULL; | 493 if (base == NULL) return NULL; |
| 494 // Update executable memory size. |
| 495 size_executable_ += reservation.size(); |
| 558 } | 496 } |
| 559 } else { | 497 } else { |
| 560 base = AllocateAlignedMemory(committed_chunk_size, | 498 base = AllocateAlignedMemory(chunk_size, |
| 561 chunk_size, | |
| 562 MemoryChunk::kAlignment, | 499 MemoryChunk::kAlignment, |
| 563 executable, | 500 executable, |
| 564 &reservation); | 501 &reservation); |
| 565 | 502 |
| 566 if (base == NULL) return NULL; | 503 if (base == NULL) return NULL; |
| 567 } | 504 } |
| 568 | 505 |
| 569 AllocationBookkeeping( | 506 #ifdef DEBUG |
| 570 owner, base, chunk_size, committed_chunk_size, executable); | 507 ZapBlock(base, chunk_size); |
| 508 #endif |
| 509 isolate_->counters()->memory_allocated()-> |
| 510 Increment(static_cast<int>(chunk_size)); |
| 511 |
| 512 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); |
| 513 if (owner != NULL) { |
| 514 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); |
| 515 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); |
| 516 } |
| 571 | 517 |
| 572 MemoryChunk* result = MemoryChunk::Initialize(heap, | 518 MemoryChunk* result = MemoryChunk::Initialize(heap, |
| 573 base, | 519 base, |
| 574 committed_chunk_size, | 520 chunk_size, |
| 575 executable, | 521 executable, |
| 576 owner); | 522 owner); |
| 577 result->set_reserved_memory(&reservation); | 523 result->set_reserved_memory(&reservation); |
| 578 return result; | 524 return result; |
| 579 } | 525 } |
| 580 | 526 |
| 581 | 527 |
| 582 void MemoryAllocator::AllocationBookkeeping(Space* owner, | 528 Page* MemoryAllocator::AllocatePage(PagedSpace* owner, |
| 583 Address base, | |
| 584 intptr_t reserved_chunk_size, | |
| 585 intptr_t committed_chunk_size, | |
| 586 Executability executable) { | |
| 587 if (executable == EXECUTABLE) { | |
| 588 // Update executable memory size. | |
| 589 size_executable_ += reserved_chunk_size; | |
| 590 } | |
| 591 | |
| 592 #ifdef DEBUG | |
| 593 ZapBlock(base, committed_chunk_size); | |
| 594 #endif | |
| 595 isolate_->counters()->memory_allocated()-> | |
| 596 Increment(static_cast<int>(committed_chunk_size)); | |
| 597 | |
| 598 LOG(isolate_, NewEvent("MemoryChunk", base, committed_chunk_size)); | |
| 599 if (owner != NULL) { | |
| 600 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); | |
| 601 PerformAllocationCallback( | |
| 602 space, kAllocationActionAllocate, committed_chunk_size); | |
| 603 } | |
| 604 } | |
| 605 | |
| 606 | |
| 607 Page* MemoryAllocator::AllocatePage(intptr_t committed_object_area_size, | |
| 608 PagedSpace* owner, | |
| 609 Executability executable) { | 529 Executability executable) { |
| 610 ASSERT(committed_object_area_size <= Page::kObjectAreaSize); | 530 MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner); |
| 611 | |
| 612 MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, | |
| 613 committed_object_area_size, | |
| 614 executable, | |
| 615 owner); | |
| 616 | 531 |
| 617 if (chunk == NULL) return NULL; | 532 if (chunk == NULL) return NULL; |
| 618 | 533 |
| 619 return Page::Initialize(isolate_->heap(), chunk, executable, owner); | 534 return Page::Initialize(isolate_->heap(), chunk, executable, owner); |
| 620 } | 535 } |
| 621 | 536 |
| 622 | 537 |
| 623 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, | 538 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, |
| 624 Executability executable, | 539 Executability executable, |
| 625 Space* owner) { | 540 Space* owner) { |
| 626 MemoryChunk* chunk = | 541 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner); |
| 627 AllocateChunk(object_size, object_size, executable, owner); | |
| 628 if (chunk == NULL) return NULL; | 542 if (chunk == NULL) return NULL; |
| 629 return LargePage::Initialize(isolate_->heap(), chunk); | 543 return LargePage::Initialize(isolate_->heap(), chunk); |
| 630 } | 544 } |
| 631 | 545 |
| 632 | 546 |
| 633 void MemoryAllocator::Free(MemoryChunk* chunk) { | 547 void MemoryAllocator::Free(MemoryChunk* chunk) { |
| 634 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); | 548 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); |
| 635 if (chunk->owner() != NULL) { | 549 if (chunk->owner() != NULL) { |
| 636 ObjectSpace space = | 550 ObjectSpace space = |
| 637 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); | 551 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); |
| 638 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); | 552 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); |
| 639 } | 553 } |
| 640 | 554 |
| 641 delete chunk->slots_buffer(); | 555 delete chunk->slots_buffer(); |
| 642 delete chunk->skip_list(); | 556 delete chunk->skip_list(); |
| 643 | 557 |
| 644 VirtualMemory* reservation = chunk->reserved_memory(); | 558 VirtualMemory* reservation = chunk->reserved_memory(); |
| 645 if (reservation->IsReserved()) { | 559 if (reservation->IsReserved()) { |
| 646 FreeMemory(reservation, chunk->executable()); | 560 FreeMemory(reservation, chunk->executable()); |
| 647 } else { | 561 } else { |
| 648 // When we do not have a reservation that is because this allocation | |
| 649 // is part of the huge reserved chunk of memory reserved for code on | |
| 650 // x64. In that case the size was rounded up to the page size on | |
| 651 // allocation so we do the same now when freeing. | |
| 652 FreeMemory(chunk->address(), | 562 FreeMemory(chunk->address(), |
| 653 RoundUp(chunk->size(), Page::kPageSize), | 563 chunk->size(), |
| 654 chunk->executable()); | 564 chunk->executable()); |
| 655 } | 565 } |
| 656 } | 566 } |
| 657 | 567 |
| 658 | 568 |
| 659 bool MemoryAllocator::CommitBlock(Address start, | 569 bool MemoryAllocator::CommitBlock(Address start, |
| 660 size_t size, | 570 size_t size, |
| 661 Executability executable) { | 571 Executability executable) { |
| 662 if (!VirtualMemory::CommitRegion(start, size, executable)) return false; | 572 if (!VirtualMemory::CommitRegion(start, size, executable)) return false; |
| 663 #ifdef DEBUG | 573 #ifdef DEBUG |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 723 memory_allocation_callbacks_.Remove(i); | 633 memory_allocation_callbacks_.Remove(i); |
| 724 return; | 634 return; |
| 725 } | 635 } |
| 726 } | 636 } |
| 727 UNREACHABLE(); | 637 UNREACHABLE(); |
| 728 } | 638 } |
| 729 | 639 |
| 730 | 640 |
| 731 #ifdef DEBUG | 641 #ifdef DEBUG |
| 732 void MemoryAllocator::ReportStatistics() { | 642 void MemoryAllocator::ReportStatistics() { |
| 733 float pct = | 643 float pct = static_cast<float>(capacity_ - size_) / capacity_; |
| 734 static_cast<float>(capacity_ - memory_allocator_reserved_) / capacity_; | |
| 735 PrintF(" capacity: %" V8_PTR_PREFIX "d" | 644 PrintF(" capacity: %" V8_PTR_PREFIX "d" |
| 736 ", used: %" V8_PTR_PREFIX "d" | 645 ", used: %" V8_PTR_PREFIX "d" |
| 737 ", available: %%%d\n\n", | 646 ", available: %%%d\n\n", |
| 738 capacity_, memory_allocator_reserved_, static_cast<int>(pct*100)); | 647 capacity_, size_, static_cast<int>(pct*100)); |
| 739 } | 648 } |
| 740 #endif | 649 #endif |
| 741 | 650 |
| 742 // ----------------------------------------------------------------------------- | 651 // ----------------------------------------------------------------------------- |
| 743 // MemoryChunk implementation | 652 // MemoryChunk implementation |
| 744 | 653 |
| 745 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) { | 654 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) { |
| 746 MemoryChunk* chunk = MemoryChunk::FromAddress(address); | 655 MemoryChunk* chunk = MemoryChunk::FromAddress(address); |
| 747 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) { | 656 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) { |
| 748 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by); | 657 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by); |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 807 Address next = cur + obj->Size(); | 716 Address next = cur + obj->Size(); |
| 808 if ((cur <= addr) && (addr < next)) return obj; | 717 if ((cur <= addr) && (addr < next)) return obj; |
| 809 } | 718 } |
| 810 | 719 |
| 811 UNREACHABLE(); | 720 UNREACHABLE(); |
| 812 return Failure::Exception(); | 721 return Failure::Exception(); |
| 813 } | 722 } |
| 814 | 723 |
| 815 bool PagedSpace::CanExpand() { | 724 bool PagedSpace::CanExpand() { |
| 816 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); | 725 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); |
| 726 ASSERT(Capacity() % Page::kObjectAreaSize == 0); |
| 817 | 727 |
| 818 if (Capacity() == max_capacity_) return false; | 728 if (Capacity() == max_capacity_) return false; |
| 819 | 729 |
| 820 ASSERT(Capacity() < max_capacity_); | 730 ASSERT(Capacity() < max_capacity_); |
| 821 | 731 |
| 822 // Are we going to exceed capacity for this space? | 732 // Are we going to exceed capacity for this space? |
| 823 if ((Capacity() + Page::kPageSize) > max_capacity_) return false; | 733 if ((Capacity() + Page::kPageSize) > max_capacity_) return false; |
| 824 | 734 |
| 825 return true; | 735 return true; |
| 826 } | 736 } |
| 827 | 737 |
| 828 bool PagedSpace::Expand(intptr_t size_in_bytes) { | 738 bool PagedSpace::Expand() { |
| 829 if (!CanExpand()) return false; | 739 if (!CanExpand()) return false; |
| 830 | 740 |
| 831 Page* last_page = anchor_.prev_page(); | |
| 832 if (last_page != &anchor_) { | |
| 833 // We have run out of linear allocation space. This may be because the | |
| 834 // most recently allocated page (stored last in the list) is a small one, | |
| 835 // that starts on a page aligned boundary, but has not a full kPageSize of | |
| 836 // committed memory. Let's commit more memory for the page. | |
| 837 intptr_t reserved_page_size = last_page->reserved_memory()->IsReserved() ? | |
| 838 last_page->reserved_memory()->size() : | |
| 839 Page::kPageSize; | |
| 840 if (last_page->size() < reserved_page_size && | |
| 841 (reserved_page_size - last_page->size()) >= size_in_bytes && | |
| 842 !last_page->IsEvacuationCandidate() && | |
| 843 last_page->WasSwept()) { | |
| 844 last_page->CommitMore(size_in_bytes); | |
| 845 return true; | |
| 846 } | |
| 847 } | |
| 848 | |
| 849 // We initially only commit a part of the page. The deserialization of the | |
| 850 // boot snapshot relies on the fact that the allocation area is linear, but | |
| 851 // that is assured, as this page will be expanded as needed. | |
| 852 int initial = static_cast<int>( | |
| 853 Max(OS::CommitPageSize(), static_cast<intptr_t>(Page::kGrowthUnit))); | |
| 854 | |
| 855 ASSERT(Page::kPageSize - initial < Page::kObjectAreaSize); | |
| 856 | |
| 857 intptr_t expansion_size = | |
| 858 Max(initial, | |
| 859 RoundUpToPowerOf2(MemoryChunk::kObjectStartOffset + size_in_bytes)) - | |
| 860 MemoryChunk::kObjectStartOffset; | |
| 861 | |
| 862 Page* p = heap()->isolate()->memory_allocator()-> | 741 Page* p = heap()->isolate()->memory_allocator()-> |
| 863 AllocatePage(expansion_size, this, executable()); | 742 AllocatePage(this, executable()); |
| 864 if (p == NULL) return false; | 743 if (p == NULL) return false; |
| 865 | 744 |
| 866 ASSERT(Capacity() <= max_capacity_); | 745 ASSERT(Capacity() <= max_capacity_); |
| 867 | 746 |
| 868 p->InsertAfter(anchor_.prev_page()); | 747 p->InsertAfter(anchor_.prev_page()); |
| 869 | 748 |
| 870 return true; | 749 return true; |
| 871 } | 750 } |
| 872 | 751 |
| 873 | 752 |
| (...skipping 24 matching lines...) Expand all Loading... |
| 898 accounting_stats_.AllocateBytes(size); | 777 accounting_stats_.AllocateBytes(size); |
| 899 ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size)); | 778 ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size)); |
| 900 } else { | 779 } else { |
| 901 DecreaseUnsweptFreeBytes(page); | 780 DecreaseUnsweptFreeBytes(page); |
| 902 } | 781 } |
| 903 | 782 |
| 904 if (Page::FromAllocationTop(allocation_info_.top) == page) { | 783 if (Page::FromAllocationTop(allocation_info_.top) == page) { |
| 905 allocation_info_.top = allocation_info_.limit = NULL; | 784 allocation_info_.top = allocation_info_.limit = NULL; |
| 906 } | 785 } |
| 907 | 786 |
| 908 intptr_t size = page->ObjectAreaEnd() - page->ObjectAreaStart(); | |
| 909 | |
| 910 page->Unlink(); | 787 page->Unlink(); |
| 911 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { | 788 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { |
| 912 heap()->isolate()->memory_allocator()->Free(page); | 789 heap()->isolate()->memory_allocator()->Free(page); |
| 913 } else { | 790 } else { |
| 914 heap()->QueueMemoryChunkForFree(page); | 791 heap()->QueueMemoryChunkForFree(page); |
| 915 } | 792 } |
| 916 | 793 |
| 917 ASSERT(Capacity() > 0); | 794 ASSERT(Capacity() > 0); |
| 918 accounting_stats_.ShrinkSpace(static_cast<int>(size)); | 795 ASSERT(Capacity() % Page::kObjectAreaSize == 0); |
| 796 accounting_stats_.ShrinkSpace(Page::kObjectAreaSize); |
| 919 } | 797 } |
| 920 | 798 |
| 921 | 799 |
| 922 void PagedSpace::ReleaseAllUnusedPages() { | 800 void PagedSpace::ReleaseAllUnusedPages() { |
| 923 PageIterator it(this); | 801 PageIterator it(this); |
| 924 while (it.has_next()) { | 802 while (it.has_next()) { |
| 925 Page* page = it.next(); | 803 Page* page = it.next(); |
| 926 if (!page->WasSwept()) { | 804 if (!page->WasSwept()) { |
| 927 if (page->LiveBytes() == 0) ReleasePage(page); | 805 if (page->LiveBytes() == 0) ReleasePage(page); |
| 928 } else { | 806 } else { |
| (...skipping 857 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1786 // Free lists for old object spaces implementation | 1664 // Free lists for old object spaces implementation |
| 1787 | 1665 |
| 1788 void FreeListNode::set_size(Heap* heap, int size_in_bytes) { | 1666 void FreeListNode::set_size(Heap* heap, int size_in_bytes) { |
| 1789 ASSERT(size_in_bytes > 0); | 1667 ASSERT(size_in_bytes > 0); |
| 1790 ASSERT(IsAligned(size_in_bytes, kPointerSize)); | 1668 ASSERT(IsAligned(size_in_bytes, kPointerSize)); |
| 1791 | 1669 |
| 1792 // We write a map and possibly size information to the block. If the block | 1670 // We write a map and possibly size information to the block. If the block |
| 1793 // is big enough to be a FreeSpace with at least one extra word (the next | 1671 // is big enough to be a FreeSpace with at least one extra word (the next |
| 1794 // pointer), we set its map to be the free space map and its size to an | 1672 // pointer), we set its map to be the free space map and its size to an |
| 1795 // appropriate array length for the desired size from HeapObject::Size(). | 1673 // appropriate array length for the desired size from HeapObject::Size(). |
| 1796 // If the block is too small (e.g. one or two words), to hold both a size | 1674 // If the block is too small (eg, one or two words), to hold both a size |
| 1797 // field and a next pointer, we give it a filler map that gives it the | 1675 // field and a next pointer, we give it a filler map that gives it the |
| 1798 // correct size. | 1676 // correct size. |
| 1799 if (size_in_bytes > FreeSpace::kHeaderSize) { | 1677 if (size_in_bytes > FreeSpace::kHeaderSize) { |
| 1800 set_map_no_write_barrier(heap->raw_unchecked_free_space_map()); | 1678 set_map_no_write_barrier(heap->raw_unchecked_free_space_map()); |
| 1801 // Can't use FreeSpace::cast because it fails during deserialization. | 1679 // Can't use FreeSpace::cast because it fails during deserialization. |
| 1802 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this); | 1680 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this); |
| 1803 this_as_free_space->set_size(size_in_bytes); | 1681 this_as_free_space->set_size(size_in_bytes); |
| 1804 } else if (size_in_bytes == kPointerSize) { | 1682 } else if (size_in_bytes == kPointerSize) { |
| 1805 set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map()); | 1683 set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map()); |
| 1806 } else if (size_in_bytes == 2 * kPointerSize) { | 1684 } else if (size_in_bytes == 2 * kPointerSize) { |
| (...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1890 } else { | 1768 } else { |
| 1891 node->set_next(huge_list_); | 1769 node->set_next(huge_list_); |
| 1892 huge_list_ = node; | 1770 huge_list_ = node; |
| 1893 } | 1771 } |
| 1894 available_ += size_in_bytes; | 1772 available_ += size_in_bytes; |
| 1895 ASSERT(IsVeryLong() || available_ == SumFreeLists()); | 1773 ASSERT(IsVeryLong() || available_ == SumFreeLists()); |
| 1896 return 0; | 1774 return 0; |
| 1897 } | 1775 } |
| 1898 | 1776 |
| 1899 | 1777 |
| 1900 FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, | 1778 FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) { |
| 1901 int* node_size, | |
| 1902 int minimum_size) { | |
| 1903 FreeListNode* node = *list; | 1779 FreeListNode* node = *list; |
| 1904 | 1780 |
| 1905 if (node == NULL) return NULL; | 1781 if (node == NULL) return NULL; |
| 1906 | 1782 |
| 1907 ASSERT(node->map() == node->GetHeap()->raw_unchecked_free_space_map()); | |
| 1908 | |
| 1909 while (node != NULL && | 1783 while (node != NULL && |
| 1910 Page::FromAddress(node->address())->IsEvacuationCandidate()) { | 1784 Page::FromAddress(node->address())->IsEvacuationCandidate()) { |
| 1911 available_ -= node->Size(); | 1785 available_ -= node->Size(); |
| 1912 node = node->next(); | 1786 node = node->next(); |
| 1913 } | 1787 } |
| 1914 | 1788 |
| 1915 if (node == NULL) { | 1789 if (node != NULL) { |
| 1790 *node_size = node->Size(); |
| 1791 *list = node->next(); |
| 1792 } else { |
| 1916 *list = NULL; | 1793 *list = NULL; |
| 1917 return NULL; | |
| 1918 } | 1794 } |
| 1919 | 1795 |
| 1920 // Gets the size without checking the map. When we are booting we have | |
| 1921 // a FreeListNode before we have created its map. | |
| 1922 intptr_t size = reinterpret_cast<FreeSpace*>(node)->Size(); | |
| 1923 | |
| 1924 // We don't search the list for one that fits, preferring to look in the | |
| 1925 // list of larger nodes, but we do check the first in the list, because | |
| 1926 // if we had to expand the space or page we may have placed an entry that | |
| 1927 // was just long enough at the head of one of the lists. | |
| 1928 if (size < minimum_size) { | |
| 1929 *list = node; | |
| 1930 return NULL; | |
| 1931 } | |
| 1932 | |
| 1933 *node_size = static_cast<int>(size); | |
| 1934 available_ -= static_cast<int>(size); | |
| 1935 *list = node->next(); | |
| 1936 | |
| 1937 return node; | 1796 return node; |
| 1938 } | 1797 } |
| 1939 | 1798 |
| 1940 | 1799 |
| 1941 FreeListNode* FreeList::FindAbuttingNode( | 1800 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) { |
| 1942 int size_in_bytes, int* node_size, Address limit, FreeListNode** list_head) { | |
| 1943 FreeListNode* first_node = *list_head; | |
| 1944 if (first_node != NULL && | |
| 1945 first_node->address() == limit && | |
| 1946 reinterpret_cast<FreeSpace*>(first_node)->Size() >= size_in_bytes && | |
| 1947 !Page::FromAddress(first_node->address())->IsEvacuationCandidate()) { | |
| 1948 FreeListNode* answer = first_node; | |
| 1949 int size = reinterpret_cast<FreeSpace*>(first_node)->Size(); | |
| 1950 available_ -= size; | |
| 1951 *node_size = size; | |
| 1952 *list_head = first_node->next(); | |
| 1953 ASSERT(IsVeryLong() || available_ == SumFreeLists()); | |
| 1954 return answer; | |
| 1955 } | |
| 1956 return NULL; | |
| 1957 } | |
| 1958 | |
| 1959 | |
| 1960 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, | |
| 1961 int* node_size, | |
| 1962 Address limit) { | |
| 1963 FreeListNode* node = NULL; | 1801 FreeListNode* node = NULL; |
| 1964 | 1802 |
| 1965 if (limit != NULL) { | 1803 if (size_in_bytes <= kSmallAllocationMax) { |
| 1966 // We may have a memory area at the head of the free list, which abuts the | 1804 node = PickNodeFromList(&small_list_, node_size); |
| 1967 // old linear allocation area. This happens if the linear allocation area | |
| 1968 // has been shortened to allow an incremental marking step to be performed. | |
| 1969 // In that case we prefer to return the free memory area that is contiguous | |
| 1970 // with the old linear allocation area. | |
| 1971 node = FindAbuttingNode(size_in_bytes, node_size, limit, &large_list_); | |
| 1972 if (node != NULL) return node; | |
| 1973 node = FindAbuttingNode(size_in_bytes, node_size, limit, &huge_list_); | |
| 1974 if (node != NULL) return node; | 1805 if (node != NULL) return node; |
| 1975 } | 1806 } |
| 1976 | 1807 |
| 1977 node = PickNodeFromList(&small_list_, node_size, size_in_bytes); | 1808 if (size_in_bytes <= kMediumAllocationMax) { |
| 1978 ASSERT(IsVeryLong() || available_ == SumFreeLists()); | 1809 node = PickNodeFromList(&medium_list_, node_size); |
| 1979 if (node != NULL) return node; | |
| 1980 | |
| 1981 node = PickNodeFromList(&medium_list_, node_size, size_in_bytes); | |
| 1982 ASSERT(IsVeryLong() || available_ == SumFreeLists()); | |
| 1983 if (node != NULL) return node; | |
| 1984 | |
| 1985 node = PickNodeFromList(&large_list_, node_size, size_in_bytes); | |
| 1986 ASSERT(IsVeryLong() || available_ == SumFreeLists()); | |
| 1987 if (node != NULL) return node; | |
| 1988 | |
| 1989 // The tricky third clause in this for statement is due to the fact that | |
| 1990 // PickNodeFromList can cut pages out of the list if they are unavailable for | |
| 1991 // new allocation (e.g. if they are on a page that has been scheduled for | |
| 1992 // evacuation). | |
| 1993 for (FreeListNode** cur = &huge_list_; | |
| 1994 *cur != NULL; | |
| 1995 cur = (*cur) == NULL ? cur : (*cur)->next_address()) { | |
| 1996 node = PickNodeFromList(cur, node_size, size_in_bytes); | |
| 1997 ASSERT(IsVeryLong() || available_ == SumFreeLists()); | |
| 1998 if (node != NULL) return node; | 1810 if (node != NULL) return node; |
| 1999 } | 1811 } |
| 2000 | 1812 |
| 1813 if (size_in_bytes <= kLargeAllocationMax) { |
| 1814 node = PickNodeFromList(&large_list_, node_size); |
| 1815 if (node != NULL) return node; |
| 1816 } |
| 1817 |
| 1818 for (FreeListNode** cur = &huge_list_; |
| 1819 *cur != NULL; |
| 1820 cur = (*cur)->next_address()) { |
| 1821 FreeListNode* cur_node = *cur; |
| 1822 while (cur_node != NULL && |
| 1823 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) { |
| 1824 available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size(); |
| 1825 cur_node = cur_node->next(); |
| 1826 } |
| 1827 |
| 1828 *cur = cur_node; |
| 1829 if (cur_node == NULL) break; |
| 1830 |
| 1831 ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map()); |
| 1832 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur); |
| 1833 int size = cur_as_free_space->Size(); |
| 1834 if (size >= size_in_bytes) { |
| 1835 // Large enough node found. Unlink it from the list. |
| 1836 node = *cur; |
| 1837 *node_size = size; |
| 1838 *cur = node->next(); |
| 1839 break; |
| 1840 } |
| 1841 } |
| 1842 |
| 2001 return node; | 1843 return node; |
| 2002 } | 1844 } |
| 2003 | 1845 |
| 2004 | 1846 |
| 2005 // Allocation on the old space free list. If it succeeds then a new linear | 1847 // Allocation on the old space free list. If it succeeds then a new linear |
| 2006 // allocation space has been set up with the top and limit of the space. If | 1848 // allocation space has been set up with the top and limit of the space. If |
| 2007 // the allocation fails then NULL is returned, and the caller can perform a GC | 1849 // the allocation fails then NULL is returned, and the caller can perform a GC |
| 2008 // or allocate a new page before retrying. | 1850 // or allocate a new page before retrying. |
| 2009 HeapObject* FreeList::Allocate(int size_in_bytes) { | 1851 HeapObject* FreeList::Allocate(int size_in_bytes) { |
| 2010 ASSERT(0 < size_in_bytes); | 1852 ASSERT(0 < size_in_bytes); |
| 2011 ASSERT(size_in_bytes <= kMaxBlockSize); | 1853 ASSERT(size_in_bytes <= kMaxBlockSize); |
| 2012 ASSERT(IsAligned(size_in_bytes, kPointerSize)); | 1854 ASSERT(IsAligned(size_in_bytes, kPointerSize)); |
| 2013 // Don't free list allocate if there is linear space available. | 1855 // Don't free list allocate if there is linear space available. |
| 2014 ASSERT(owner_->limit() - owner_->top() < size_in_bytes); | 1856 ASSERT(owner_->limit() - owner_->top() < size_in_bytes); |
| 2015 | 1857 |
| 2016 int new_node_size = 0; | 1858 int new_node_size = 0; |
| 2017 FreeListNode* new_node = | 1859 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); |
| 2018 FindNodeFor(size_in_bytes, &new_node_size, owner_->limit()); | |
| 2019 if (new_node == NULL) return NULL; | 1860 if (new_node == NULL) return NULL; |
| 2020 | 1861 |
| 2021 if (new_node->address() == owner_->limit()) { | 1862 available_ -= new_node_size; |
| 2022 // The new freelist node we were given is an extension of the one we had | |
| 2023 // last. This is a common thing to happen when we extend a small page by | |
| 2024 // committing more memory. In this case we just add the new node to the | |
| 2025 // linear allocation area and recurse. | |
| 2026 owner_->Allocate(new_node_size); | |
| 2027 owner_->SetTop(owner_->top(), new_node->address() + new_node_size); | |
| 2028 MaybeObject* allocation = owner_->AllocateRaw(size_in_bytes); | |
| 2029 Object* answer; | |
| 2030 if (!allocation->ToObject(&answer)) return NULL; | |
| 2031 return HeapObject::cast(answer); | |
| 2032 } | |
| 2033 | |
| 2034 ASSERT(IsVeryLong() || available_ == SumFreeLists()); | 1863 ASSERT(IsVeryLong() || available_ == SumFreeLists()); |
| 2035 | 1864 |
| 2036 int bytes_left = new_node_size - size_in_bytes; | 1865 int bytes_left = new_node_size - size_in_bytes; |
| 2037 ASSERT(bytes_left >= 0); | 1866 ASSERT(bytes_left >= 0); |
| 2038 | 1867 |
| 2039 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); | 1868 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); |
| 2040 // Mark the old linear allocation area with a free space map so it can be | 1869 // Mark the old linear allocation area with a free space map so it can be |
| 2041 // skipped when scanning the heap. This also puts it back in the free list | 1870 // skipped when scanning the heap. This also puts it back in the free list |
| 2042 // if it is big enough. | 1871 // if it is big enough. |
| 2043 if (old_linear_size != 0) { | 1872 owner_->Free(owner_->top(), old_linear_size); |
| 2044 owner_->AddToFreeLists(owner_->top(), old_linear_size); | |
| 2045 } | |
| 2046 | 1873 |
| 2047 #ifdef DEBUG | 1874 #ifdef DEBUG |
| 2048 for (int i = 0; i < size_in_bytes / kPointerSize; i++) { | 1875 for (int i = 0; i < size_in_bytes / kPointerSize; i++) { |
| 2049 reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0); | 1876 reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0); |
| 2050 } | 1877 } |
| 2051 #endif | 1878 #endif |
| 2052 | 1879 |
| 2053 owner_->heap()->incremental_marking()->OldSpaceStep( | 1880 owner_->heap()->incremental_marking()->OldSpaceStep( |
| 2054 size_in_bytes - old_linear_size); | 1881 size_in_bytes - old_linear_size); |
| 2055 | 1882 |
| 2056 // The old-space-step might have finished sweeping and restarted marking. | 1883 // The old-space-step might have finished sweeping and restarted marking. |
| 2057 // Verify that it did not turn the page of the new node into an evacuation | 1884 // Verify that it did not turn the page of the new node into an evacuation |
| 2058 // candidate. | 1885 // candidate. |
| 2059 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); | 1886 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); |
| 2060 | 1887 |
| 2061 const int kThreshold = IncrementalMarking::kAllocatedThreshold; | 1888 const int kThreshold = IncrementalMarking::kAllocatedThreshold; |
| 2062 | 1889 |
| 2063 // Memory in the linear allocation area is counted as allocated. We may free | 1890 // Memory in the linear allocation area is counted as allocated. We may free |
| 2064 // a little of this again immediately - see below. | 1891 // a little of this again immediately - see below. |
| 2065 owner_->Allocate(new_node_size); | 1892 owner_->Allocate(new_node_size); |
| 2066 | 1893 |
| 2067 if (bytes_left > kThreshold && | 1894 if (bytes_left > kThreshold && |
| 2068 owner_->heap()->incremental_marking()->IsMarkingIncomplete() && | 1895 owner_->heap()->incremental_marking()->IsMarkingIncomplete() && |
| 2069 FLAG_incremental_marking_steps) { | 1896 FLAG_incremental_marking_steps) { |
| 2070 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); | 1897 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); |
| 2071 // We don't want to give too large linear areas to the allocator while | 1898 // We don't want to give too large linear areas to the allocator while |
| 2072 // incremental marking is going on, because we won't check again whether | 1899 // incremental marking is going on, because we won't check again whether |
| 2073 // we want to do another increment until the linear area is used up. | 1900 // we want to do another increment until the linear area is used up. |
| 2074 owner_->AddToFreeLists(new_node->address() + size_in_bytes + linear_size, | 1901 owner_->Free(new_node->address() + size_in_bytes + linear_size, |
| 2075 new_node_size - size_in_bytes - linear_size); | 1902 new_node_size - size_in_bytes - linear_size); |
| 2076 owner_->SetTop(new_node->address() + size_in_bytes, | 1903 owner_->SetTop(new_node->address() + size_in_bytes, |
| 2077 new_node->address() + size_in_bytes + linear_size); | 1904 new_node->address() + size_in_bytes + linear_size); |
| 2078 } else if (bytes_left > 0) { | 1905 } else if (bytes_left > 0) { |
| 2079 // Normally we give the rest of the node to the allocator as its new | 1906 // Normally we give the rest of the node to the allocator as its new |
| 2080 // linear allocation area. | 1907 // linear allocation area. |
| 2081 owner_->SetTop(new_node->address() + size_in_bytes, | 1908 owner_->SetTop(new_node->address() + size_in_bytes, |
| 2082 new_node->address() + new_node_size); | 1909 new_node->address() + new_node_size); |
| 2083 } else { | 1910 } else { |
| 2084 ASSERT(bytes_left == 0); | |
| 2085 // TODO(gc) Try not freeing linear allocation region when bytes_left | 1911 // TODO(gc) Try not freeing linear allocation region when bytes_left |
| 2086 // are zero. | 1912 // are zero. |
| 2087 owner_->SetTop(NULL, NULL); | 1913 owner_->SetTop(NULL, NULL); |
| 2088 } | 1914 } |
| 2089 | 1915 |
| 2090 return new_node; | 1916 return new_node; |
| 2091 } | 1917 } |
| 2092 | 1918 |
| 2093 | 1919 |
| 2094 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) { | 1920 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) { |
| (...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2207 // or because we have lowered the limit in order to get periodic incremental | 2033 // or because we have lowered the limit in order to get periodic incremental |
| 2208 // marking. The most reliable way to ensure that there is linear space is | 2034 // marking. The most reliable way to ensure that there is linear space is |
| 2209 // to do the allocation, then rewind the limit. | 2035 // to do the allocation, then rewind the limit. |
| 2210 ASSERT(bytes <= InitialCapacity()); | 2036 ASSERT(bytes <= InitialCapacity()); |
| 2211 MaybeObject* maybe = AllocateRaw(bytes); | 2037 MaybeObject* maybe = AllocateRaw(bytes); |
| 2212 Object* object = NULL; | 2038 Object* object = NULL; |
| 2213 if (!maybe->ToObject(&object)) return false; | 2039 if (!maybe->ToObject(&object)) return false; |
| 2214 HeapObject* allocation = HeapObject::cast(object); | 2040 HeapObject* allocation = HeapObject::cast(object); |
| 2215 Address top = allocation_info_.top; | 2041 Address top = allocation_info_.top; |
| 2216 if ((top - bytes) == allocation->address()) { | 2042 if ((top - bytes) == allocation->address()) { |
| 2217 Address new_top = allocation->address(); | 2043 allocation_info_.top = allocation->address(); |
| 2218 ASSERT(new_top >= Page::FromAddress(new_top - 1)->ObjectAreaStart()); | |
| 2219 allocation_info_.top = new_top; | |
| 2220 return true; | 2044 return true; |
| 2221 } | 2045 } |
| 2222 // There may be a borderline case here where the allocation succeeded, but | 2046 // There may be a borderline case here where the allocation succeeded, but |
| 2223 // the limit and top have moved on to a new page. In that case we try again. | 2047 // the limit and top have moved on to a new page. In that case we try again. |
| 2224 return ReserveSpace(bytes); | 2048 return ReserveSpace(bytes); |
| 2225 } | 2049 } |
| 2226 | 2050 |
| 2227 | 2051 |
| 2228 void PagedSpace::PrepareForMarkCompact() { | 2052 void PagedSpace::PrepareForMarkCompact() { |
| 2229 // We don't have a linear allocation area while sweeping. It will be restored | 2053 // We don't have a linear allocation area while sweeping. It will be restored |
| 2230 // on the first allocation after the sweep. | 2054 // on the first allocation after the sweep. |
| 2231 // Mark the old linear allocation area with a free space map so it can be | 2055 // Mark the old linear allocation area with a free space map so it can be |
| 2232 // skipped when scanning the heap. | 2056 // skipped when scanning the heap. |
| 2233 int old_linear_size = static_cast<int>(limit() - top()); | 2057 int old_linear_size = static_cast<int>(limit() - top()); |
| 2234 AddToFreeLists(top(), old_linear_size); | 2058 Free(top(), old_linear_size); |
| 2235 SetTop(NULL, NULL); | 2059 SetTop(NULL, NULL); |
| 2236 | 2060 |
| 2237 // Stop lazy sweeping and clear marking bits for unswept pages. | 2061 // Stop lazy sweeping and clear marking bits for unswept pages. |
| 2238 if (first_unswept_page_ != NULL) { | 2062 if (first_unswept_page_ != NULL) { |
| 2239 Page* p = first_unswept_page_; | 2063 Page* p = first_unswept_page_; |
| 2240 do { | 2064 do { |
| 2241 // Do not use ShouldBeSweptLazily predicate here. | 2065 // Do not use ShouldBeSweptLazily predicate here. |
| 2242 // New evacuation candidates were selected but they still have | 2066 // New evacuation candidates were selected but they still have |
| 2243 // to be swept before collection starts. | 2067 // to be swept before collection starts. |
| 2244 if (!p->WasSwept()) { | 2068 if (!p->WasSwept()) { |
| (...skipping 22 matching lines...) Expand all Loading... |
| 2267 if (new_top <= allocation_info_.limit) return true; | 2091 if (new_top <= allocation_info_.limit) return true; |
| 2268 | 2092 |
| 2269 HeapObject* new_area = free_list_.Allocate(size_in_bytes); | 2093 HeapObject* new_area = free_list_.Allocate(size_in_bytes); |
| 2270 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes); | 2094 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes); |
| 2271 if (new_area == NULL) return false; | 2095 if (new_area == NULL) return false; |
| 2272 | 2096 |
| 2273 int old_linear_size = static_cast<int>(limit() - top()); | 2097 int old_linear_size = static_cast<int>(limit() - top()); |
| 2274 // Mark the old linear allocation area with a free space so it can be | 2098 // Mark the old linear allocation area with a free space so it can be |
| 2275 // skipped when scanning the heap. This also puts it back in the free list | 2099 // skipped when scanning the heap. This also puts it back in the free list |
| 2276 // if it is big enough. | 2100 // if it is big enough. |
| 2277 AddToFreeLists(top(), old_linear_size); | 2101 Free(top(), old_linear_size); |
| 2278 | 2102 |
| 2279 SetTop(new_area->address(), new_area->address() + size_in_bytes); | 2103 SetTop(new_area->address(), new_area->address() + size_in_bytes); |
| 2280 // The AddToFreeLists call above will reduce the size of the space in the | 2104 Allocate(size_in_bytes); |
| 2281 // allocation stats. We don't need to add this linear area to the size | |
| 2282 // with an Allocate(size_in_bytes) call here, because the | |
| 2283 // free_list_.Allocate() call above already accounted for this memory. | |
| 2284 return true; | 2105 return true; |
| 2285 } | 2106 } |
| 2286 | 2107 |
| 2287 | 2108 |
| 2288 // You have to call this last, since the implementation from PagedSpace | 2109 // You have to call this last, since the implementation from PagedSpace |
| 2289 // doesn't know that memory was 'promised' to large object space. | 2110 // doesn't know that memory was 'promised' to large object space. |
| 2290 bool LargeObjectSpace::ReserveSpace(int bytes) { | 2111 bool LargeObjectSpace::ReserveSpace(int bytes) { |
| 2291 return heap()->OldGenerationSpaceAvailable() >= bytes; | 2112 return heap()->OldGenerationSpaceAvailable() >= bytes; |
| 2292 } | 2113 } |
| 2293 | 2114 |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2354 | 2175 |
| 2355 // Free list allocation failed and there is no next page. Fail if we have | 2176 // Free list allocation failed and there is no next page. Fail if we have |
| 2356 // hit the old generation size limit that should cause a garbage | 2177 // hit the old generation size limit that should cause a garbage |
| 2357 // collection. | 2178 // collection. |
| 2358 if (!heap()->always_allocate() && | 2179 if (!heap()->always_allocate() && |
| 2359 heap()->OldGenerationAllocationLimitReached()) { | 2180 heap()->OldGenerationAllocationLimitReached()) { |
| 2360 return NULL; | 2181 return NULL; |
| 2361 } | 2182 } |
| 2362 | 2183 |
| 2363 // Try to expand the space and allocate in the new next page. | 2184 // Try to expand the space and allocate in the new next page. |
| 2364 if (Expand(size_in_bytes)) { | 2185 if (Expand()) { |
| 2365 return free_list_.Allocate(size_in_bytes); | 2186 return free_list_.Allocate(size_in_bytes); |
| 2366 } | 2187 } |
| 2367 | 2188 |
| 2368 // Last ditch, sweep all the remaining pages to try to find space. This may | 2189 // Last ditch, sweep all the remaining pages to try to find space. This may |
| 2369 // cause a pause. | 2190 // cause a pause. |
| 2370 if (!IsSweepingComplete()) { | 2191 if (!IsSweepingComplete()) { |
| 2371 AdvanceSweeper(kMaxInt); | 2192 AdvanceSweeper(kMaxInt); |
| 2372 | 2193 |
| 2373 // Retry the free list allocation. | 2194 // Retry the free list allocation. |
| 2374 HeapObject* object = free_list_.Allocate(size_in_bytes); | 2195 HeapObject* object = free_list_.Allocate(size_in_bytes); |
| (...skipping 340 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2715 if (previous == NULL) { | 2536 if (previous == NULL) { |
| 2716 first_page_ = current; | 2537 first_page_ = current; |
| 2717 } else { | 2538 } else { |
| 2718 previous->set_next_page(current); | 2539 previous->set_next_page(current); |
| 2719 } | 2540 } |
| 2720 | 2541 |
| 2721 // Free the chunk. | 2542 // Free the chunk. |
| 2722 heap()->mark_compact_collector()->ReportDeleteIfNeeded( | 2543 heap()->mark_compact_collector()->ReportDeleteIfNeeded( |
| 2723 object, heap()->isolate()); | 2544 object, heap()->isolate()); |
| 2724 size_ -= static_cast<int>(page->size()); | 2545 size_ -= static_cast<int>(page->size()); |
| 2725 ASSERT(size_ >= 0); | |
| 2726 objects_size_ -= object->Size(); | 2546 objects_size_ -= object->Size(); |
| 2727 page_count_--; | 2547 page_count_--; |
| 2728 | 2548 |
| 2729 if (is_pointer_object) { | 2549 if (is_pointer_object) { |
| 2730 heap()->QueueMemoryChunkForFree(page); | 2550 heap()->QueueMemoryChunkForFree(page); |
| 2731 } else { | 2551 } else { |
| 2732 heap()->isolate()->memory_allocator()->Free(page); | 2552 heap()->isolate()->memory_allocator()->Free(page); |
| 2733 } | 2553 } |
| 2734 } | 2554 } |
| 2735 } | 2555 } |
| (...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2854 object->ShortPrint(); | 2674 object->ShortPrint(); |
| 2855 PrintF("\n"); | 2675 PrintF("\n"); |
| 2856 } | 2676 } |
| 2857 printf(" --------------------------------------\n"); | 2677 printf(" --------------------------------------\n"); |
| 2858 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 2678 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 2859 } | 2679 } |
| 2860 | 2680 |
| 2861 #endif // DEBUG | 2681 #endif // DEBUG |
| 2862 | 2682 |
| 2863 } } // namespace v8::internal | 2683 } } // namespace v8::internal |
| OLD | NEW |