Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(62)

Side by Side Diff: src/heap.cc

Issue 40063002: Bookkeeping for allocation site pretenuring (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Rebase built on site fields in another CL. Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1529 matching lines...) Expand 10 before | Expand all | Expand 10 after
1540 LOG(isolate_, ResourceEvent("scavenge", "end")); 1540 LOG(isolate_, ResourceEvent("scavenge", "end"));
1541 1541
1542 gc_state_ = NOT_IN_GC; 1542 gc_state_ = NOT_IN_GC;
1543 1543
1544 scavenges_since_last_idle_round_++; 1544 scavenges_since_last_idle_round_++;
1545 1545
1546 if (FLAG_trace_track_allocation_sites && allocation_mementos_found_ > 0) { 1546 if (FLAG_trace_track_allocation_sites && allocation_mementos_found_ > 0) {
1547 PrintF("AllocationMementos found during scavenge = %d\n", 1547 PrintF("AllocationMementos found during scavenge = %d\n",
1548 allocation_mementos_found_); 1548 allocation_mementos_found_);
1549 } 1549 }
1550
1551 if (FLAG_allocation_site_pretenuring) {
1552 int pretenure_decisions_made = 0;
1553 Object* cur = allocation_sites_list();
1554 if (cur->IsAllocationSite()) {
1555 while (cur->IsAllocationSite()) {
1556 AllocationSite* casted = AllocationSite::cast(cur);
1557 if (!(casted->DecisionMade())) {
Hannes Payer (out of office) 2013/11/21 20:52:24 See the other comment about not enough allocation
mvstanton 2013/11/22 11:18:51 I've simplified and encapsulated this code in a wa
1558 casted->Decide();
1559 if (casted->GetPretenureMode() == TENURED) {
1560 pretenure_decisions_made++;
1561 }
1562 }
1563 casted->Clear();
1564 cur = casted->weak_next();
1565 }
1566 }
Hannes Payer (out of office) 2013/11/21 20:52:24 Right now we do not support switching state which
mvstanton 2013/11/22 11:18:51 Done.
1567
1568 if (FLAG_trace_track_allocation_sites && pretenure_decisions_made > 0) {
1569 PrintF("Scavenge: pretenure decisions made: %d\n",
1570 pretenure_decisions_made);
1571 }
1572 }
1550 } 1573 }
1551 1574
1552 1575
1553 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap, 1576 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1554 Object** p) { 1577 Object** p) {
1555 MapWord first_word = HeapObject::cast(*p)->map_word(); 1578 MapWord first_word = HeapObject::cast(*p)->map_word();
1556 1579
1557 if (!first_word.IsForwardingAddress()) { 1580 if (!first_word.IsForwardingAddress()) {
1558 // Unreachable external string can be finalized. 1581 // Unreachable external string can be finalized.
1559 heap->FinalizeExternalString(String::cast(*p)); 1582 heap->FinalizeExternalString(String::cast(*p));
(...skipping 2809 matching lines...) Expand 10 before | Expand all | Expand 10 after
4369 Object* result; 4392 Object* result;
4370 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space); 4393 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4371 if (!maybe_result->ToObject(&result)) return maybe_result; 4394 if (!maybe_result->ToObject(&result)) return maybe_result;
4372 // No need for write barrier since object is white and map is in old space. 4395 // No need for write barrier since object is white and map is in old space.
4373 HeapObject::cast(result)->set_map_no_write_barrier(map); 4396 HeapObject::cast(result)->set_map_no_write_barrier(map);
4374 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>( 4397 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4375 reinterpret_cast<Address>(result) + map->instance_size()); 4398 reinterpret_cast<Address>(result) + map->instance_size());
4376 alloc_memento->set_map_no_write_barrier(allocation_memento_map()); 4399 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
4377 ASSERT(allocation_site->map() == allocation_site_map()); 4400 ASSERT(allocation_site->map() == allocation_site_map());
4378 alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER); 4401 alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
4402 if (FLAG_allocation_site_pretenuring) {
4403 allocation_site->IncrementMementoCreateCount();
Hannes Payer (out of office) 2013/11/21 20:52:24 I am wondering if this counter is not only interes
mvstanton 2013/11/22 11:18:51 Maybe so, but not yet. I like the idea of keeping
4404 }
4379 return result; 4405 return result;
4380 } 4406 }
4381 4407
4382 4408
4383 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) { 4409 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4384 ASSERT(gc_state_ == NOT_IN_GC); 4410 ASSERT(gc_state_ == NOT_IN_GC);
4385 ASSERT(map->instance_type() != MAP_TYPE); 4411 ASSERT(map->instance_type() != MAP_TYPE);
4386 // If allocation failures are disallowed, we may allocate in a different 4412 // If allocation failures are disallowed, we may allocate in a different
4387 // space when new space is full and the object is not a large object. 4413 // space when new space is full and the object is not a large object.
4388 AllocationSpace retry_space = 4414 AllocationSpace retry_space =
(...skipping 412 matching lines...) Expand 10 before | Expand all | Expand 10 after
4801 MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) { 4827 MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
4802 // Never used to copy functions. If functions need to be copied we 4828 // Never used to copy functions. If functions need to be copied we
4803 // have to be careful to clear the literals array. 4829 // have to be careful to clear the literals array.
4804 SLOW_ASSERT(!source->IsJSFunction()); 4830 SLOW_ASSERT(!source->IsJSFunction());
4805 4831
4806 // Make the clone. 4832 // Make the clone.
4807 Map* map = source->map(); 4833 Map* map = source->map();
4808 int object_size = map->instance_size(); 4834 int object_size = map->instance_size();
4809 Object* clone; 4835 Object* clone;
4810 4836
4811 ASSERT(site == NULL || (AllocationSite::CanTrack(map->instance_type()) && 4837 ASSERT(site == NULL || AllocationSite::CanTrack(map->instance_type()));
4812 map->instance_type() == JS_ARRAY_TYPE));
4813 4838
4814 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER; 4839 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4815 4840
4816 // If we're forced to always allocate, we use the general allocation 4841 // If we're forced to always allocate, we use the general allocation
4817 // functions which may leave us with an object in old space. 4842 // functions which may leave us with an object in old space.
4818 if (always_allocate()) { 4843 if (always_allocate()) {
4819 { MaybeObject* maybe_clone = 4844 { MaybeObject* maybe_clone =
4820 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE); 4845 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4821 if (!maybe_clone->ToObject(&clone)) return maybe_clone; 4846 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4822 } 4847 }
(...skipping 21 matching lines...) Expand all
4844 CopyBlock(HeapObject::cast(clone)->address(), 4869 CopyBlock(HeapObject::cast(clone)->address(),
4845 source->address(), 4870 source->address(),
4846 object_size); 4871 object_size);
4847 4872
4848 if (site != NULL) { 4873 if (site != NULL) {
4849 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>( 4874 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4850 reinterpret_cast<Address>(clone) + object_size); 4875 reinterpret_cast<Address>(clone) + object_size);
4851 alloc_memento->set_map_no_write_barrier(allocation_memento_map()); 4876 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
4852 ASSERT(site->map() == allocation_site_map()); 4877 ASSERT(site->map() == allocation_site_map());
4853 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER); 4878 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
4879 if (FLAG_allocation_site_pretenuring) {
4880 site->IncrementMementoCreateCount();
4881 }
4854 HeapProfiler* profiler = isolate()->heap_profiler(); 4882 HeapProfiler* profiler = isolate()->heap_profiler();
4855 if (profiler->is_tracking_allocations()) { 4883 if (profiler->is_tracking_allocations()) {
4856 profiler->UpdateObjectSizeEvent(HeapObject::cast(clone)->address(), 4884 profiler->UpdateObjectSizeEvent(HeapObject::cast(clone)->address(),
4857 object_size); 4885 object_size);
4858 profiler->NewObjectEvent(alloc_memento->address(), 4886 profiler->NewObjectEvent(alloc_memento->address(),
4859 AllocationMemento::kSize); 4887 AllocationMemento::kSize);
4860 } 4888 }
4861 } 4889 }
4862 } 4890 }
4863 4891
(...skipping 3091 matching lines...) Expand 10 before | Expand all | Expand 10 after
7955 static_cast<int>(object_sizes_last_time_[index])); 7983 static_cast<int>(object_sizes_last_time_[index]));
7956 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) 7984 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
7957 #undef ADJUST_LAST_TIME_OBJECT_COUNT 7985 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7958 7986
7959 OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); 7987 OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
7960 OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); 7988 OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
7961 ClearObjectStats(); 7989 ClearObjectStats();
7962 } 7990 }
7963 7991
7964 } } // namespace v8::internal 7992 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698