Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(162)

Side by Side Diff: src/heap.cc

Issue 40063002: Bookkeeping for allocation site pretenuring (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Comment response Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
80 // Will be 4 * reserved_semispace_size_ to ensure that young 80 // Will be 4 * reserved_semispace_size_ to ensure that young
81 // generation can be aligned to its size. 81 // generation can be aligned to its size.
82 maximum_committed_(0), 82 maximum_committed_(0),
83 survived_since_last_expansion_(0), 83 survived_since_last_expansion_(0),
84 sweep_generation_(0), 84 sweep_generation_(0),
85 always_allocate_scope_depth_(0), 85 always_allocate_scope_depth_(0),
86 linear_allocation_scope_depth_(0), 86 linear_allocation_scope_depth_(0),
87 contexts_disposed_(0), 87 contexts_disposed_(0),
88 global_ic_age_(0), 88 global_ic_age_(0),
89 flush_monomorphic_ics_(false), 89 flush_monomorphic_ics_(false),
90 allocation_mementos_found_(0),
91 scan_on_scavenge_pages_(0), 90 scan_on_scavenge_pages_(0),
92 new_space_(this), 91 new_space_(this),
93 old_pointer_space_(NULL), 92 old_pointer_space_(NULL),
94 old_data_space_(NULL), 93 old_data_space_(NULL),
95 code_space_(NULL), 94 code_space_(NULL),
96 map_space_(NULL), 95 map_space_(NULL),
97 cell_space_(NULL), 96 cell_space_(NULL),
98 property_cell_space_(NULL), 97 property_cell_space_(NULL),
99 lo_space_(NULL), 98 lo_space_(NULL),
100 gc_state_(NOT_IN_GC), 99 gc_state_(NOT_IN_GC),
(...skipping 398 matching lines...) Expand 10 before | Expand all | Expand 10 after
499 PagedSpaces spaces(this); 498 PagedSpaces spaces(this);
500 for (PagedSpace* space = spaces.next(); 499 for (PagedSpace* space = spaces.next();
501 space != NULL; 500 space != NULL;
502 space = spaces.next()) { 501 space = spaces.next()) {
503 space->RepairFreeListsAfterBoot(); 502 space->RepairFreeListsAfterBoot();
504 } 503 }
505 } 504 }
506 505
507 506
508 void Heap::GarbageCollectionEpilogue() { 507 void Heap::GarbageCollectionEpilogue() {
508 if (FLAG_allocation_site_pretenuring) {
509 int tenure_decisions = 0;
510 int dont_tenure_decisions = 0;
511 int allocation_mementos_found = 0;
512
513 Object* cur = allocation_sites_list();
514 if (cur->IsAllocationSite()) {
Hannes Payer (out of office) 2013/11/25 11:45:52 I guess the if is not needed.
mvstanton 2013/11/25 13:49:18 Done.
515 while (cur->IsAllocationSite()) {
516 AllocationSite* casted = AllocationSite::cast(cur);
517 allocation_mementos_found += casted->memento_found_count()->value();
518 if (casted->DigestPretenuringFeedback()) {
519 if (casted->GetPretenureMode() == TENURED) {
520 tenure_decisions++;
521 } else {
522 dont_tenure_decisions++;
523 }
524 }
525 cur = casted->weak_next();
526 }
527 }
528
529 // TODO(mvstanton): Pretenure decisions are only made once for an allocation
530 // site. Find a sane way to decide about revisiting the decision later.
531
532 if (FLAG_trace_track_allocation_sites &&
533 (allocation_mementos_found > 0 ||
534 tenure_decisions > 0 ||
535 dont_tenure_decisions > 0)) {
536 PrintF("GC: (#mementos, #tenure decisions, #donttenure decisions) "
537 "(%d, %d, %d)\n",
538 allocation_mementos_found,
539 tenure_decisions,
540 dont_tenure_decisions);
541 }
542 }
543
509 store_buffer()->GCEpilogue(); 544 store_buffer()->GCEpilogue();
510 545
511 // In release mode, we only zap the from space under heap verification. 546 // In release mode, we only zap the from space under heap verification.
512 if (Heap::ShouldZapGarbage()) { 547 if (Heap::ShouldZapGarbage()) {
513 ZapFromSpace(); 548 ZapFromSpace();
514 } 549 }
515 550
516 #ifdef VERIFY_HEAP 551 #ifdef VERIFY_HEAP
517 if (FLAG_verify_heap) { 552 if (FLAG_verify_heap) {
518 Verify(); 553 Verify();
(...skipping 867 matching lines...) Expand 10 before | Expand all | Expand 10 after
1386 } 1421 }
1387 1422
1388 private: 1423 private:
1389 Heap* heap_; 1424 Heap* heap_;
1390 }; 1425 };
1391 1426
1392 1427
1393 void Heap::Scavenge() { 1428 void Heap::Scavenge() {
1394 RelocationLock relocation_lock(this); 1429 RelocationLock relocation_lock(this);
1395 1430
1396 allocation_mementos_found_ = 0;
1397
1398 #ifdef VERIFY_HEAP 1431 #ifdef VERIFY_HEAP
1399 if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this); 1432 if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1400 #endif 1433 #endif
1401 1434
1402 gc_state_ = SCAVENGE; 1435 gc_state_ = SCAVENGE;
1403 1436
1404 // Implements Cheney's copying algorithm 1437 // Implements Cheney's copying algorithm
1405 LOG(isolate_, ResourceEvent("scavenge", "begin")); 1438 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1406 1439
1407 // Clear descriptor cache. 1440 // Clear descriptor cache.
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after
1535 1568
1536 // Update how much has survived scavenge. 1569 // Update how much has survived scavenge.
1537 IncrementYoungSurvivorsCounter(static_cast<int>( 1570 IncrementYoungSurvivorsCounter(static_cast<int>(
1538 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size())); 1571 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1539 1572
1540 LOG(isolate_, ResourceEvent("scavenge", "end")); 1573 LOG(isolate_, ResourceEvent("scavenge", "end"));
1541 1574
1542 gc_state_ = NOT_IN_GC; 1575 gc_state_ = NOT_IN_GC;
1543 1576
1544 scavenges_since_last_idle_round_++; 1577 scavenges_since_last_idle_round_++;
1545
1546 if (FLAG_trace_track_allocation_sites && allocation_mementos_found_ > 0) {
1547 PrintF("AllocationMementos found during scavenge = %d\n",
1548 allocation_mementos_found_);
1549 }
1550 } 1578 }
1551 1579
1552 1580
1553 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap, 1581 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1554 Object** p) { 1582 Object** p) {
1555 MapWord first_word = HeapObject::cast(*p)->map_word(); 1583 MapWord first_word = HeapObject::cast(*p)->map_word();
1556 1584
1557 if (!first_word.IsForwardingAddress()) { 1585 if (!first_word.IsForwardingAddress()) {
1558 // Unreachable external string can be finalized. 1586 // Unreachable external string can be finalized.
1559 heap->FinalizeExternalString(String::cast(*p)); 1587 heap->FinalizeExternalString(String::cast(*p));
(...skipping 2805 matching lines...) Expand 10 before | Expand all | Expand 10 after
4365 // space when new space is full and the object is not a large object. 4393 // space when new space is full and the object is not a large object.
4366 AllocationSpace retry_space = 4394 AllocationSpace retry_space =
4367 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type()); 4395 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4368 int size = map->instance_size() + AllocationMemento::kSize; 4396 int size = map->instance_size() + AllocationMemento::kSize;
4369 Object* result; 4397 Object* result;
4370 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space); 4398 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4371 if (!maybe_result->ToObject(&result)) return maybe_result; 4399 if (!maybe_result->ToObject(&result)) return maybe_result;
4372 // No need for write barrier since object is white and map is in old space. 4400 // No need for write barrier since object is white and map is in old space.
4373 HeapObject::cast(result)->set_map_no_write_barrier(map); 4401 HeapObject::cast(result)->set_map_no_write_barrier(map);
4374 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>( 4402 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4375 reinterpret_cast<Address>(result) + map->instance_size()); 4403 reinterpret_cast<Address>(result) + map->instance_size());
Hannes Payer (out of office) 2013/11/25 11:45:52 Is there a way to increase the memento count at me
mvstanton 2013/11/25 13:49:18 I consolidated the two heap.cc memento constructio
4376 alloc_memento->set_map_no_write_barrier(allocation_memento_map()); 4404 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
4377 ASSERT(allocation_site->map() == allocation_site_map()); 4405 ASSERT(allocation_site->map() == allocation_site_map());
4378 alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER); 4406 alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
4407 if (FLAG_allocation_site_pretenuring) {
4408 allocation_site->IncrementMementoCreateCount();
4409 }
4379 return result; 4410 return result;
4380 } 4411 }
4381 4412
4382 4413
4383 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) { 4414 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4384 ASSERT(gc_state_ == NOT_IN_GC); 4415 ASSERT(gc_state_ == NOT_IN_GC);
4385 ASSERT(map->instance_type() != MAP_TYPE); 4416 ASSERT(map->instance_type() != MAP_TYPE);
4386 // If allocation failures are disallowed, we may allocate in a different 4417 // If allocation failures are disallowed, we may allocate in a different
4387 // space when new space is full and the object is not a large object. 4418 // space when new space is full and the object is not a large object.
4388 AllocationSpace retry_space = 4419 AllocationSpace retry_space =
(...skipping 412 matching lines...) Expand 10 before | Expand all | Expand 10 after
4801 MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) { 4832 MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
4802 // Never used to copy functions. If functions need to be copied we 4833 // Never used to copy functions. If functions need to be copied we
4803 // have to be careful to clear the literals array. 4834 // have to be careful to clear the literals array.
4804 SLOW_ASSERT(!source->IsJSFunction()); 4835 SLOW_ASSERT(!source->IsJSFunction());
4805 4836
4806 // Make the clone. 4837 // Make the clone.
4807 Map* map = source->map(); 4838 Map* map = source->map();
4808 int object_size = map->instance_size(); 4839 int object_size = map->instance_size();
4809 Object* clone; 4840 Object* clone;
4810 4841
4811 ASSERT(site == NULL || (AllocationSite::CanTrack(map->instance_type()) && 4842 ASSERT(site == NULL || AllocationSite::CanTrack(map->instance_type()));
4812 map->instance_type() == JS_ARRAY_TYPE));
4813 4843
4814 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER; 4844 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4815 4845
4816 // If we're forced to always allocate, we use the general allocation 4846 // If we're forced to always allocate, we use the general allocation
4817 // functions which may leave us with an object in old space. 4847 // functions which may leave us with an object in old space.
4818 if (always_allocate()) { 4848 if (always_allocate()) {
4819 { MaybeObject* maybe_clone = 4849 { MaybeObject* maybe_clone =
4820 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE); 4850 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4821 if (!maybe_clone->ToObject(&clone)) return maybe_clone; 4851 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4822 } 4852 }
(...skipping 21 matching lines...) Expand all
4844 CopyBlock(HeapObject::cast(clone)->address(), 4874 CopyBlock(HeapObject::cast(clone)->address(),
4845 source->address(), 4875 source->address(),
4846 object_size); 4876 object_size);
4847 4877
4848 if (site != NULL) { 4878 if (site != NULL) {
4849 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>( 4879 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4850 reinterpret_cast<Address>(clone) + object_size); 4880 reinterpret_cast<Address>(clone) + object_size);
4851 alloc_memento->set_map_no_write_barrier(allocation_memento_map()); 4881 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
4852 ASSERT(site->map() == allocation_site_map()); 4882 ASSERT(site->map() == allocation_site_map());
4853 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER); 4883 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
4884 if (FLAG_allocation_site_pretenuring) {
4885 site->IncrementMementoCreateCount();
4886 }
4854 HeapProfiler* profiler = isolate()->heap_profiler(); 4887 HeapProfiler* profiler = isolate()->heap_profiler();
4855 if (profiler->is_tracking_allocations()) { 4888 if (profiler->is_tracking_allocations()) {
4856 profiler->UpdateObjectSizeEvent(HeapObject::cast(clone)->address(), 4889 profiler->UpdateObjectSizeEvent(HeapObject::cast(clone)->address(),
4857 object_size); 4890 object_size);
4858 profiler->NewObjectEvent(alloc_memento->address(), 4891 profiler->NewObjectEvent(alloc_memento->address(),
4859 AllocationMemento::kSize); 4892 AllocationMemento::kSize);
4860 } 4893 }
4861 } 4894 }
4862 } 4895 }
4863 4896
(...skipping 3091 matching lines...) Expand 10 before | Expand all | Expand 10 after
7955 static_cast<int>(object_sizes_last_time_[index])); 7988 static_cast<int>(object_sizes_last_time_[index]));
7956 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) 7989 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
7957 #undef ADJUST_LAST_TIME_OBJECT_COUNT 7990 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7958 7991
7959 OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); 7992 OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
7960 OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); 7993 OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
7961 ClearObjectStats(); 7994 ClearObjectStats();
7962 } 7995 }
7963 7996
7964 } } // namespace v8::internal 7997 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698