OLD | NEW |
1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/scavenger.h" | 5 #include "vm/scavenger.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <map> | 8 #include <map> |
9 #include <utility> | 9 #include <utility> |
10 | 10 |
(...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
165 delayed_weak_stack_.Add(it->second); | 165 delayed_weak_stack_.Add(it->second); |
166 } | 166 } |
167 delay_set_.erase(ret.first, ret.second); | 167 delay_set_.erase(ret.first, ret.second); |
168 } | 168 } |
169 intptr_t size = raw_obj->Size(); | 169 intptr_t size = raw_obj->Size(); |
170 // Check whether object should be promoted. | 170 // Check whether object should be promoted. |
171 if (scavenger_->survivor_end_ <= raw_addr) { | 171 if (scavenger_->survivor_end_ <= raw_addr) { |
172 // Not a survivor of a previous scavenge. Just copy the object into the | 172 // Not a survivor of a previous scavenge. Just copy the object into the |
173 // to space. | 173 // to space. |
174 new_addr = scavenger_->TryAllocate(size); | 174 new_addr = scavenger_->TryAllocate(size); |
| 175 if (HeapTrace::is_enabled()) { |
| 176 heap_->trace()->TraceCopy(raw_addr, new_addr); |
| 177 } |
175 } else { | 178 } else { |
176 // TODO(iposva): Experiment with less aggressive promotion. For example | 179 // TODO(iposva): Experiment with less aggressive promotion. For example |
177 // a coin toss determines if an object is promoted or whether it should | 180 // a coin toss determines if an object is promoted or whether it should |
178 // survive in this generation. | 181 // survive in this generation. |
179 // | 182 // |
180 // This object is a survivor of a previous scavenge. Attempt to promote | 183 // This object is a survivor of a previous scavenge. Attempt to promote |
181 // the object. | 184 // the object. |
182 new_addr = heap_->TryAllocate(size, Heap::kOld, growth_policy_); | 185 new_addr = heap_->TryAllocate(size, Heap::kOld, growth_policy_); |
183 if (new_addr != 0) { | 186 if (new_addr != 0) { |
184 // If promotion succeeded then we need to remember it so that it can | 187 // If promotion succeeded then we need to remember it so that it can |
185 // be traversed later. | 188 // be traversed later. |
186 scavenger_->PushToPromotedStack(new_addr); | 189 scavenger_->PushToPromotedStack(new_addr); |
187 bytes_promoted_ += size; | 190 bytes_promoted_ += size; |
| 191 if (HeapTrace::is_enabled()) { |
| 192 heap_->trace()->TracePromotion(raw_addr, new_addr); |
| 193 } |
188 } else if (!scavenger_->had_promotion_failure_) { | 194 } else if (!scavenger_->had_promotion_failure_) { |
189 // Signal a promotion failure and set the growth policy for | 195 // Signal a promotion failure and set the growth policy for |
190 // this, and all subsequent promotion allocations, to force | 196 // this, and all subsequent promotion allocations, to force |
191 // growth. | 197 // growth. |
192 scavenger_->had_promotion_failure_ = true; | 198 scavenger_->had_promotion_failure_ = true; |
193 growth_policy_ = PageSpace::kForceGrowth; | 199 growth_policy_ = PageSpace::kForceGrowth; |
194 new_addr = heap_->TryAllocate(size, Heap::kOld, growth_policy_); | 200 new_addr = heap_->TryAllocate(size, Heap::kOld, growth_policy_); |
195 if (new_addr != 0) { | 201 if (new_addr != 0) { |
196 scavenger_->PushToPromotedStack(new_addr); | 202 scavenger_->PushToPromotedStack(new_addr); |
197 bytes_promoted_ += size; | 203 bytes_promoted_ += size; |
| 204 if (HeapTrace::is_enabled()) { |
| 205 heap_->trace()->TracePromotion(raw_addr, new_addr); |
| 206 } |
198 } else { | 207 } else { |
199 // Promotion did not succeed. Copy into the to space | 208 // Promotion did not succeed. Copy into the to space |
200 // instead. | 209 // instead. |
201 new_addr = scavenger_->TryAllocate(size); | 210 new_addr = scavenger_->TryAllocate(size); |
| 211 if (HeapTrace::is_enabled()) { |
| 212 heap_->trace()->TraceCopy(raw_addr, new_addr); |
| 213 } |
202 } | 214 } |
203 } else { | 215 } else { |
204 ASSERT(growth_policy_ == PageSpace::kForceGrowth); | 216 ASSERT(growth_policy_ == PageSpace::kForceGrowth); |
205 // Promotion did not succeed. Copy into the to space instead. | 217 // Promotion did not succeed. Copy into the to space instead. |
206 new_addr = scavenger_->TryAllocate(size); | 218 new_addr = scavenger_->TryAllocate(size); |
| 219 if (HeapTrace::is_enabled()) { |
| 220 heap_->trace()->TraceCopy(raw_addr, new_addr); |
| 221 } |
207 } | 222 } |
208 } | 223 } |
209 // During a scavenge we always succeed to at least copy all of the | 224 // During a scavenge we always succeed to at least copy all of the |
210 // current objects to the to space. | 225 // current objects to the to space. |
211 ASSERT(new_addr != 0); | 226 ASSERT(new_addr != 0); |
212 // Copy the object to the new location. | 227 // Copy the object to the new location. |
213 memmove(reinterpret_cast<void*>(new_addr), | 228 memmove(reinterpret_cast<void*>(new_addr), |
214 reinterpret_cast<void*>(raw_addr), | 229 reinterpret_cast<void*>(raw_addr), |
215 size); | 230 size); |
216 // Remember forwarding address. | 231 // Remember forwarding address. |
(...skipping 411 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
628 | 643 |
629 if (FLAG_verify_before_gc) { | 644 if (FLAG_verify_before_gc) { |
630 OS::PrintErr("Verifying before Scavenge..."); | 645 OS::PrintErr("Verifying before Scavenge..."); |
631 heap_->Verify(); | 646 heap_->Verify(); |
632 OS::PrintErr(" done.\n"); | 647 OS::PrintErr(" done.\n"); |
633 } | 648 } |
634 | 649 |
635 if (FLAG_verbose_gc) { | 650 if (FLAG_verbose_gc) { |
636 OS::PrintErr("Start scavenge for %s collection\n", gc_reason); | 651 OS::PrintErr("Start scavenge for %s collection\n", gc_reason); |
637 } | 652 } |
| 653 uword prev_first_obj_start = FirstObjectStart(); |
| 654 uword prev_top_addr = *(TopAddress()); |
638 Timer timer(FLAG_verbose_gc, "Scavenge"); | 655 Timer timer(FLAG_verbose_gc, "Scavenge"); |
639 timer.Start(); | 656 timer.Start(); |
640 | 657 |
641 intptr_t in_use_before = in_use(); | 658 intptr_t in_use_before = in_use(); |
642 | 659 |
643 // Setup the visitor and run a scavenge. | 660 // Setup the visitor and run a scavenge. |
644 ScavengerVisitor visitor(isolate, this); | 661 ScavengerVisitor visitor(isolate, this); |
645 Prologue(isolate, invoke_api_callbacks); | 662 Prologue(isolate, invoke_api_callbacks); |
646 IterateRoots(isolate, &visitor, !invoke_api_callbacks); | 663 IterateRoots(isolate, &visitor, !invoke_api_callbacks); |
647 ProcessToSpace(&visitor); | 664 ProcessToSpace(&visitor); |
(...skipping 16 matching lines...) Expand all Loading... |
664 (capacity() + KB2) / KB, | 681 (capacity() + KB2) / KB, |
665 (visitor.bytes_promoted() + KB2) / KB); | 682 (visitor.bytes_promoted() + KB2) / KB); |
666 } | 683 } |
667 | 684 |
668 if (FLAG_verify_after_gc) { | 685 if (FLAG_verify_after_gc) { |
669 OS::PrintErr("Verifying after Scavenge..."); | 686 OS::PrintErr("Verifying after Scavenge..."); |
670 heap_->Verify(); | 687 heap_->Verify(); |
671 OS::PrintErr(" done.\n"); | 688 OS::PrintErr(" done.\n"); |
672 } | 689 } |
673 | 690 |
| 691 if (HeapTrace::is_enabled()) { |
| 692 heap_->trace()->TraceDeathRange(prev_first_obj_start, prev_top_addr); |
| 693 } |
| 694 |
674 count_++; | 695 count_++; |
675 // Done scavenging. Reset the marker. | 696 // Done scavenging. Reset the marker. |
676 ASSERT(scavenging_); | 697 ASSERT(scavenging_); |
677 scavenging_ = false; | 698 scavenging_ = false; |
678 } | 699 } |
679 | 700 |
680 | 701 |
681 void Scavenger::WriteProtect(bool read_only) { | 702 void Scavenger::WriteProtect(bool read_only) { |
682 space_->Protect( | 703 space_->Protect( |
683 read_only ? VirtualMemory::kReadOnly : VirtualMemory::kReadWrite); | 704 read_only ? VirtualMemory::kReadOnly : VirtualMemory::kReadWrite); |
(...skipping 13 matching lines...) Expand all Loading... |
697 PeerTable::iterator it = peer_table_.find(raw_obj); | 718 PeerTable::iterator it = peer_table_.find(raw_obj); |
698 return (it == peer_table_.end()) ? NULL : it->second; | 719 return (it == peer_table_.end()) ? NULL : it->second; |
699 } | 720 } |
700 | 721 |
701 | 722 |
702 int64_t Scavenger::PeerCount() const { | 723 int64_t Scavenger::PeerCount() const { |
703 return static_cast<int64_t>(peer_table_.size()); | 724 return static_cast<int64_t>(peer_table_.size()); |
704 } | 725 } |
705 | 726 |
706 } // namespace dart | 727 } // namespace dart |
OLD | NEW |