OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/tracked_objects.h" | 5 #include "base/tracked_objects.h" |
6 | 6 |
7 #include <math.h> | 7 #include <math.h> |
8 #include <stdlib.h> | 8 #include <stdlib.h> |
9 | 9 |
10 #include "base/format_macros.h" | 10 #include "base/format_macros.h" |
11 #include "base/memory/scoped_ptr.h" | |
12 #include "base/process_util.h" | |
11 #include "base/profiler/alternate_timer.h" | 13 #include "base/profiler/alternate_timer.h" |
12 #include "base/stringprintf.h" | 14 #include "base/stringprintf.h" |
13 #include "base/third_party/valgrind/memcheck.h" | 15 #include "base/third_party/valgrind/memcheck.h" |
14 #include "base/threading/thread_restrictions.h" | 16 #include "base/threading/thread_restrictions.h" |
15 #include "build/build_config.h" | |
16 #include "base/port.h" | 17 #include "base/port.h" |
17 | 18 |
18 using base::TimeDelta; | 19 using base::TimeDelta; |
19 | 20 |
20 namespace tracked_objects { | 21 namespace tracked_objects { |
21 | 22 |
22 namespace { | 23 namespace { |
23 | 24 |
24 // Flag to compile out almost all of the task tracking code. | 25 // Flag to compile out almost all of the task tracking code. |
25 const bool kTrackAllTaskObjects = true; | 26 const bool kTrackAllTaskObjects = true; |
(...skipping 11 matching lines...) Expand all Loading... | |
37 ThreadData::PROFILING_CHILDREN_ACTIVE; | 38 ThreadData::PROFILING_CHILDREN_ACTIVE; |
38 | 39 |
39 // Control whether an alternate time source (Now() function) is supported by | 40 // Control whether an alternate time source (Now() function) is supported by |
40 // the ThreadData class. This compile time flag should be set to true if we | 41 // the ThreadData class. This compile time flag should be set to true if we |
41 // want other modules (such as a memory allocator, or a thread-specific CPU time | 42 // want other modules (such as a memory allocator, or a thread-specific CPU time |
42 // clock) to be able to provide a thread-specific Now() function. Without this | 43 // clock) to be able to provide a thread-specific Now() function. Without this |
43 // compile-time flag, the code will only support the wall-clock time. This flag | 44 // compile-time flag, the code will only support the wall-clock time. This flag |
44 // can be flipped to efficiently disable this path (if there is a performance | 45 // can be flipped to efficiently disable this path (if there is a performance |
45 // problem with its presence). | 46 // problem with its presence). |
46 static const bool kAllowAlternateTimeSourceHandling = true; | 47 static const bool kAllowAlternateTimeSourceHandling = true; |
48 | |
47 } // namespace | 49 } // namespace |
48 | 50 |
49 //------------------------------------------------------------------------------ | 51 //------------------------------------------------------------------------------ |
50 // DeathData tallies durations when a death takes place. | 52 // DeathData tallies durations when a death takes place. |
51 | 53 |
52 DeathData::DeathData() { | 54 DeathData::DeathData() { |
53 Clear(); | 55 Clear(); |
54 } | 56 } |
55 | 57 |
56 DeathData::DeathData(int count) { | 58 DeathData::DeathData(int count) { |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
106 } | 108 } |
107 | 109 |
108 DurationInt DeathData::queue_duration_max() const { | 110 DurationInt DeathData::queue_duration_max() const { |
109 return queue_duration_max_; | 111 return queue_duration_max_; |
110 } | 112 } |
111 | 113 |
112 DurationInt DeathData::queue_duration_sample() const { | 114 DurationInt DeathData::queue_duration_sample() const { |
113 return queue_duration_sample_; | 115 return queue_duration_sample_; |
114 } | 116 } |
115 | 117 |
116 | |
117 base::DictionaryValue* DeathData::ToValue() const { | |
118 base::DictionaryValue* dictionary = new base::DictionaryValue; | |
119 dictionary->Set("count", base::Value::CreateIntegerValue(count_)); | |
120 dictionary->Set("run_ms", | |
121 base::Value::CreateIntegerValue(run_duration_sum())); | |
122 dictionary->Set("run_ms_max", | |
123 base::Value::CreateIntegerValue(run_duration_max())); | |
124 dictionary->Set("run_ms_sample", | |
125 base::Value::CreateIntegerValue(run_duration_sample())); | |
126 dictionary->Set("queue_ms", | |
127 base::Value::CreateIntegerValue(queue_duration_sum())); | |
128 dictionary->Set("queue_ms_max", | |
129 base::Value::CreateIntegerValue(queue_duration_max())); | |
130 dictionary->Set("queue_ms_sample", | |
131 base::Value::CreateIntegerValue(queue_duration_sample())); | |
132 return dictionary; | |
133 } | |
134 | |
135 void DeathData::ResetMax() { | 118 void DeathData::ResetMax() { |
136 run_duration_max_ = 0; | 119 run_duration_max_ = 0; |
137 queue_duration_max_ = 0; | 120 queue_duration_max_ = 0; |
138 } | 121 } |
139 | 122 |
140 void DeathData::Clear() { | 123 void DeathData::Clear() { |
141 count_ = 0; | 124 count_ = 0; |
142 run_duration_sum_ = 0; | 125 run_duration_sum_ = 0; |
143 run_duration_max_ = 0; | 126 run_duration_max_ = 0; |
144 run_duration_sample_ = 0; | 127 run_duration_sample_ = 0; |
145 queue_duration_sum_ = 0; | 128 queue_duration_sum_ = 0; |
146 queue_duration_max_ = 0; | 129 queue_duration_max_ = 0; |
147 queue_duration_sample_ = 0; | 130 queue_duration_sample_ = 0; |
148 } | 131 } |
149 | 132 |
150 //------------------------------------------------------------------------------ | 133 //------------------------------------------------------------------------------ |
134 SerializedDeathData::SerializedDeathData() | |
135 : count(-1), | |
136 run_duration_sum(-1), | |
137 run_duration_max(-1), | |
138 run_duration_sample(-1), | |
139 queue_duration_sum(-1), | |
140 queue_duration_max(-1), | |
141 queue_duration_sample(-1) { | |
142 } | |
143 | |
144 SerializedDeathData::SerializedDeathData( | |
145 const tracked_objects::DeathData& death_data) | |
146 : count(death_data.count()), | |
147 run_duration_sum(death_data.run_duration_sum()), | |
148 run_duration_max(death_data.run_duration_max()), | |
149 run_duration_sample(death_data.run_duration_sample()), | |
150 queue_duration_sum(death_data.queue_duration_sum()), | |
151 queue_duration_max(death_data.queue_duration_max()), | |
152 queue_duration_sample(death_data.queue_duration_sample()) { | |
153 } | |
154 | |
155 SerializedDeathData::~SerializedDeathData() { | |
156 } | |
157 | |
158 //------------------------------------------------------------------------------ | |
151 BirthOnThread::BirthOnThread(const Location& location, | 159 BirthOnThread::BirthOnThread(const Location& location, |
152 const ThreadData& current) | 160 const ThreadData& current) |
153 : location_(location), | 161 : location_(location), |
154 birth_thread_(¤t) { | 162 birth_thread_(¤t) { |
155 } | 163 } |
156 | 164 |
157 const Location BirthOnThread::location() const { return location_; } | 165 //------------------------------------------------------------------------------ |
158 const ThreadData* BirthOnThread::birth_thread() const { return birth_thread_; } | 166 SerializedBirthOnThread::SerializedBirthOnThread() { |
167 } | |
159 | 168 |
160 void BirthOnThread::ToValue(const std::string& prefix, | 169 SerializedBirthOnThread::SerializedBirthOnThread( |
161 base::DictionaryValue* dictionary) const { | 170 const tracked_objects::BirthOnThread& birth) |
162 dictionary->Set(prefix + "_location", location_.ToValue()); | 171 : location(birth.location()), |
163 dictionary->Set(prefix + "_thread", | 172 thread_name(birth.birth_thread()->thread_name()) { |
164 base::Value::CreateStringValue(birth_thread_->thread_name())); | 173 } |
174 | |
175 SerializedBirthOnThread::~SerializedBirthOnThread() { | |
165 } | 176 } |
166 | 177 |
167 //------------------------------------------------------------------------------ | 178 //------------------------------------------------------------------------------ |
168 Births::Births(const Location& location, const ThreadData& current) | 179 Births::Births(const Location& location, const ThreadData& current) |
169 : BirthOnThread(location, current), | 180 : BirthOnThread(location, current), |
170 birth_count_(1) { } | 181 birth_count_(1) { } |
171 | 182 |
172 int Births::birth_count() const { return birth_count_; } | 183 int Births::birth_count() const { return birth_count_; } |
173 | 184 |
174 void Births::RecordBirth() { ++birth_count_; } | 185 void Births::RecordBirth() { ++birth_count_; } |
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
327 return; | 338 return; |
328 } | 339 } |
329 // We must NOT do any allocations during this callback. | 340 // We must NOT do any allocations during this callback. |
330 // Using the simple linked lists avoids all allocations. | 341 // Using the simple linked lists avoids all allocations. |
331 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL)); | 342 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL)); |
332 this->next_retired_worker_ = first_retired_worker_; | 343 this->next_retired_worker_ = first_retired_worker_; |
333 first_retired_worker_ = this; | 344 first_retired_worker_ = this; |
334 } | 345 } |
335 | 346 |
336 // static | 347 // static |
337 base::DictionaryValue* ThreadData::ToValue(bool reset_max) { | 348 void ThreadData::ToSerializedProcessData(bool reset_max, |
338 DataCollector collected_data; // Gather data. | 349 SerializedProcessData* process_data) { |
339 // Request multiple calls to collected_data.Append() for all threads. | 350 // Add births that have run to completion to |collected_data|. |
340 SendAllMaps(reset_max, &collected_data); | 351 // |birth_counts| tracks the total number of births recorded at each location |
341 collected_data.AddListOfLivingObjects(); // Add births that are still alive. | 352 // for which we have not seen a death count. |
342 base::DictionaryValue* dictionary = new base::DictionaryValue(); | 353 std::map<const BirthOnThread*, int> birth_counts; |
343 collected_data.ToValue(dictionary); | 354 ThreadData::SerializeAllExecutedTasks(reset_max, process_data, &birth_counts); |
344 return dictionary; | 355 |
356 // Add births that are still active -- i.e. objects that have tallied a birth, | |
357 // but have not yet tallied a matching death, and hence must be either | |
358 // running, queued up, or being held in limbo for future posting. | |
359 for (std::map<const BirthOnThread*, int>::const_iterator it = | |
360 birth_counts.begin(); | |
361 it != birth_counts.end(); ++it) { | |
362 if (it->second > 0) { | |
363 process_data->snapshots.push_back( | |
364 SerializedSnapshot(*it->first, DeathData(it->second), "Still_Alive")); | |
365 } | |
366 } | |
345 } | 367 } |
346 | 368 |
347 Births* ThreadData::TallyABirth(const Location& location) { | 369 Births* ThreadData::TallyABirth(const Location& location) { |
348 BirthMap::iterator it = birth_map_.find(location); | 370 BirthMap::iterator it = birth_map_.find(location); |
349 Births* child; | 371 Births* child; |
350 if (it != birth_map_.end()) { | 372 if (it != birth_map_.end()) { |
351 child = it->second; | 373 child = it->second; |
352 child->RecordBirth(); | 374 child->RecordBirth(); |
353 } else { | 375 } else { |
354 child = new Births(location, *this); // Leak this. | 376 child = new Births(location, *this); // Leak this. |
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
520 | 542 |
521 DurationInt queue_duration = 0; | 543 DurationInt queue_duration = 0; |
522 DurationInt run_duration = 0; | 544 DurationInt run_duration = 0; |
523 if (!start_of_run.is_null() && !end_of_run.is_null()) | 545 if (!start_of_run.is_null() && !end_of_run.is_null()) |
524 run_duration = (end_of_run - start_of_run).InMilliseconds(); | 546 run_duration = (end_of_run - start_of_run).InMilliseconds(); |
525 current_thread_data->TallyADeath(*birth, queue_duration, run_duration); | 547 current_thread_data->TallyADeath(*birth, queue_duration, run_duration); |
526 } | 548 } |
527 | 549 |
528 const std::string ThreadData::thread_name() const { return thread_name_; } | 550 const std::string ThreadData::thread_name() const { return thread_name_; } |
529 | 551 |
552 // static | |
553 void ThreadData::SerializeAllExecutedTasks( | |
554 bool reset_max, | |
555 SerializedProcessData* process_data, | |
556 std::map<const BirthOnThread*, int>* birth_counts) { | |
557 if (!kTrackAllTaskObjects) | |
558 return; // Not compiled in. | |
559 | |
560 // Get an unchanging copy of a ThreadData list. | |
561 ThreadData* my_list = ThreadData::first(); | |
562 | |
563 // Gather data serially. | |
564 // This hackish approach *can* get some slighly corrupt tallies, as we are | |
565 // grabbing values without the protection of a lock, but it has the advantage | |
566 // of working even with threads that don't have message loops. If a user | |
567 // sees any strangeness, they can always just run their stats gathering a | |
568 // second time. | |
569 for (ThreadData* thread_data = my_list; | |
570 thread_data; | |
571 thread_data = thread_data->next()) { | |
572 thread_data->SerializeExecutedTasks(reset_max, process_data, birth_counts); | |
573 } | |
574 } | |
575 | |
576 void ThreadData::SerializeExecutedTasks( | |
577 bool reset_max, | |
578 SerializedProcessData* process_data, | |
579 std::map<const BirthOnThread*, int>* birth_counts) { | |
580 // Get copy of data, so that the data will not change during the iterations | |
581 // and processing. | |
582 ThreadData::BirthMap birth_map; | |
583 ThreadData::DeathMap death_map; | |
584 ThreadData::ParentChildSet parent_child_set; | |
585 SnapshotMaps(reset_max, &birth_map, &death_map, &parent_child_set); | |
586 | |
587 for (ThreadData::DeathMap::const_iterator it = death_map.begin(); | |
588 it != death_map.end(); ++it) { | |
589 process_data->snapshots.push_back( | |
590 SerializedSnapshot(*it->first, it->second, thread_name())); | |
591 (*birth_counts)[it->first] -= it->first->birth_count(); | |
592 } | |
593 | |
594 for (ThreadData::BirthMap::const_iterator it = birth_map.begin(); | |
595 it != birth_map.end(); ++it) { | |
596 (*birth_counts)[it->second] += it->second->birth_count(); | |
597 } | |
598 | |
599 if (!kTrackParentChildLinks) | |
600 return; | |
601 | |
602 for (ThreadData::ParentChildSet::const_iterator it = parent_child_set.begin(); | |
603 it != parent_child_set.end(); ++it) { | |
604 process_data->descendants.push_back(SerializedParentChildPair(*it)); | |
605 } | |
606 } | |
607 | |
530 // This may be called from another thread. | 608 // This may be called from another thread. |
531 void ThreadData::SnapshotMaps(bool reset_max, | 609 void ThreadData::SnapshotMaps(bool reset_max, |
532 BirthMap* birth_map, | 610 BirthMap* birth_map, |
533 DeathMap* death_map, | 611 DeathMap* death_map, |
534 ParentChildSet* parent_child_set) { | 612 ParentChildSet* parent_child_set) { |
535 base::AutoLock lock(map_lock_); | 613 base::AutoLock lock(map_lock_); |
536 for (BirthMap::const_iterator it = birth_map_.begin(); | 614 for (BirthMap::const_iterator it = birth_map_.begin(); |
537 it != birth_map_.end(); ++it) | 615 it != birth_map_.end(); ++it) |
538 (*birth_map)[it->first] = it->second; | 616 (*birth_map)[it->first] = it->second; |
539 for (DeathMap::iterator it = death_map_.begin(); | 617 for (DeathMap::iterator it = death_map_.begin(); |
540 it != death_map_.end(); ++it) { | 618 it != death_map_.end(); ++it) { |
541 (*death_map)[it->first] = it->second; | 619 (*death_map)[it->first] = it->second; |
542 if (reset_max) | 620 if (reset_max) |
543 it->second.ResetMax(); | 621 it->second.ResetMax(); |
544 } | 622 } |
545 | 623 |
546 if (!kTrackParentChildLinks) | 624 if (!kTrackParentChildLinks) |
547 return; | 625 return; |
548 | 626 |
549 for (ParentChildSet::iterator it = parent_child_set_.begin(); | 627 for (ParentChildSet::iterator it = parent_child_set_.begin(); |
550 it != parent_child_set_.end(); ++it) | 628 it != parent_child_set_.end(); ++it) |
551 parent_child_set->insert(*it); | 629 parent_child_set->insert(*it); |
552 } | 630 } |
553 | 631 |
554 // static | 632 // static |
555 void ThreadData::SendAllMaps(bool reset_max, class DataCollector* target) { | |
556 if (!kTrackAllTaskObjects) | |
557 return; // Not compiled in. | |
558 // Get an unchanging copy of a ThreadData list. | |
559 ThreadData* my_list = ThreadData::first(); | |
560 | |
561 // Gather data serially. | |
562 // This hackish approach *can* get some slighly corrupt tallies, as we are | |
563 // grabbing values without the protection of a lock, but it has the advantage | |
564 // of working even with threads that don't have message loops. If a user | |
565 // sees any strangeness, they can always just run their stats gathering a | |
566 // second time. | |
567 for (ThreadData* thread_data = my_list; | |
568 thread_data; | |
569 thread_data = thread_data->next()) { | |
570 // Get copy of data. | |
571 ThreadData::BirthMap birth_map; | |
572 ThreadData::DeathMap death_map; | |
573 ThreadData::ParentChildSet parent_child_set; | |
574 thread_data->SnapshotMaps(reset_max, &birth_map, &death_map, | |
575 &parent_child_set); | |
576 target->Append(*thread_data, birth_map, death_map, parent_child_set); | |
577 } | |
578 } | |
579 | |
580 // static | |
581 void ThreadData::ResetAllThreadData() { | 633 void ThreadData::ResetAllThreadData() { |
582 ThreadData* my_list = first(); | 634 ThreadData* my_list = first(); |
583 | 635 |
584 for (ThreadData* thread_data = my_list; | 636 for (ThreadData* thread_data = my_list; |
585 thread_data; | 637 thread_data; |
586 thread_data = thread_data->next()) | 638 thread_data = thread_data->next()) |
587 thread_data->Reset(); | 639 thread_data->Reset(); |
588 } | 640 } |
589 | 641 |
590 void ThreadData::Reset() { | 642 void ThreadData::Reset() { |
(...skipping 187 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
778 thread_data_list = thread_data_list->next(); | 830 thread_data_list = thread_data_list->next(); |
779 | 831 |
780 for (BirthMap::iterator it = next_thread_data->birth_map_.begin(); | 832 for (BirthMap::iterator it = next_thread_data->birth_map_.begin(); |
781 next_thread_data->birth_map_.end() != it; ++it) | 833 next_thread_data->birth_map_.end() != it; ++it) |
782 delete it->second; // Delete the Birth Records. | 834 delete it->second; // Delete the Birth Records. |
783 delete next_thread_data; // Includes all Death Records. | 835 delete next_thread_data; // Includes all Death Records. |
784 } | 836 } |
785 } | 837 } |
786 | 838 |
787 //------------------------------------------------------------------------------ | 839 //------------------------------------------------------------------------------ |
788 // Individual 3-tuple of birth (place and thread) along with death thread, and | 840 SerializedSnapshot::SerializedSnapshot() { |
789 // the accumulated stats for instances (DeathData). | |
790 | |
791 Snapshot::Snapshot(const BirthOnThread& birth_on_thread, | |
792 const ThreadData& death_thread, | |
793 const DeathData& death_data) | |
794 : birth_(&birth_on_thread), | |
795 death_thread_(&death_thread), | |
796 death_data_(death_data) { | |
797 } | 841 } |
798 | 842 |
799 Snapshot::Snapshot(const BirthOnThread& birth_on_thread, int count) | 843 SerializedSnapshot::SerializedSnapshot(const BirthOnThread& birth, |
800 : birth_(&birth_on_thread), | 844 const DeathData& death_data, |
801 death_thread_(NULL), | 845 const std::string& death_thread_name) |
802 death_data_(DeathData(count)) { | 846 : birth(birth), |
847 death_data(death_data), | |
848 death_thread_name(death_thread_name) { | |
803 } | 849 } |
804 | 850 |
805 const std::string Snapshot::DeathThreadName() const { | 851 SerializedSnapshot::~SerializedSnapshot() { |
806 if (death_thread_) | |
807 return death_thread_->thread_name(); | |
808 return "Still_Alive"; | |
809 } | |
810 | |
811 base::DictionaryValue* Snapshot::ToValue() const { | |
812 base::DictionaryValue* dictionary = new base::DictionaryValue; | |
813 // TODO(jar): Switch the next two lines to: | |
814 // birth_->ToValue("birth", dictionary); | |
815 // ...but that will require fixing unit tests, and JS to take | |
816 // "birth_location" rather than "location" | |
817 dictionary->Set("birth_thread", | |
818 base::Value::CreateStringValue(birth_->birth_thread()->thread_name())); | |
819 dictionary->Set("location", birth_->location().ToValue()); | |
820 | |
821 dictionary->Set("death_data", death_data_.ToValue()); | |
822 dictionary->Set("death_thread", | |
823 base::Value::CreateStringValue(DeathThreadName())); | |
824 return dictionary; | |
825 } | 852 } |
826 | 853 |
827 //------------------------------------------------------------------------------ | 854 //------------------------------------------------------------------------------ |
828 // DataCollector | 855 // SerializedParentChildPair |
829 | 856 |
830 DataCollector::DataCollector() {} | 857 SerializedParentChildPair::SerializedParentChildPair(){ |
831 | |
832 DataCollector::~DataCollector() { | |
833 } | 858 } |
834 | 859 |
835 void DataCollector::Append(const ThreadData& thread_data, | 860 SerializedParentChildPair::SerializedParentChildPair( |
836 const ThreadData::BirthMap& birth_map, | 861 const ThreadData::ParentChildPair& parent_child) |
837 const ThreadData::DeathMap& death_map, | 862 : parent(*parent_child.first), |
jar (doing other things)
2012/04/04 17:55:35
nit: indent 4 from left margin
Ilya Sherman
2012/04/05 02:51:04
Done.
| |
838 const ThreadData::ParentChildSet& parent_child_set) { | 863 child(*parent_child.second) { |
839 for (ThreadData::DeathMap::const_iterator it = death_map.begin(); | |
840 it != death_map.end(); ++it) { | |
841 collection_.push_back(Snapshot(*it->first, thread_data, it->second)); | |
842 global_birth_count_[it->first] -= it->first->birth_count(); | |
843 } | |
844 | |
845 for (ThreadData::BirthMap::const_iterator it = birth_map.begin(); | |
846 it != birth_map.end(); ++it) { | |
847 global_birth_count_[it->second] += it->second->birth_count(); | |
848 } | |
849 | |
850 if (!kTrackParentChildLinks) | |
851 return; | |
852 | |
853 for (ThreadData::ParentChildSet::const_iterator it = parent_child_set.begin(); | |
854 it != parent_child_set.end(); ++it) { | |
855 parent_child_set_.insert(*it); | |
856 } | |
857 } | 864 } |
858 | 865 |
859 DataCollector::Collection* DataCollector::collection() { | 866 SerializedParentChildPair::~SerializedParentChildPair() { |
860 return &collection_; | |
861 } | 867 } |
862 | 868 |
863 void DataCollector::AddListOfLivingObjects() { | 869 //------------------------------------------------------------------------------ |
864 for (BirthCount::iterator it = global_birth_count_.begin(); | 870 // SerializedProcessData |
865 it != global_birth_count_.end(); ++it) { | 871 |
866 if (it->second > 0) | 872 SerializedProcessData::SerializedProcessData() |
867 collection_.push_back(Snapshot(*it->first, it->second)); | 873 : process_id(base::GetCurrentProcId()) { |
868 } | |
869 } | 874 } |
870 | 875 |
871 void DataCollector::ToValue(base::DictionaryValue* dictionary) const { | 876 SerializedProcessData::~SerializedProcessData() { |
872 base::ListValue* list = new base::ListValue; | |
873 for (size_t i = 0; i < collection_.size(); ++i) { | |
874 list->Append(collection_[i].ToValue()); | |
875 } | |
876 dictionary->Set("list", list); | |
877 | |
878 base::ListValue* descendants = new base::ListValue; | |
879 for (ThreadData::ParentChildSet::const_iterator it = | |
880 parent_child_set_.begin(); | |
881 it != parent_child_set_.end(); | |
882 ++it) { | |
883 base::DictionaryValue* parent_child = new base::DictionaryValue; | |
884 it->first->ToValue("parent", parent_child); | |
885 it->second->ToValue("child", parent_child); | |
886 descendants->Append(parent_child); | |
887 } | |
888 dictionary->Set("descendants", descendants); | |
889 } | 877 } |
890 | 878 |
891 } // namespace tracked_objects | 879 } // namespace tracked_objects |
OLD | NEW |