OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // Histogram is an object that aggregates statistics, and can summarize them in | 5 // Histogram is an object that aggregates statistics, and can summarize them in |
6 // various forms, including ASCII graphical, HTML, and numerically (as a | 6 // various forms, including ASCII graphical, HTML, and numerically (as a |
7 // vector of numbers corresponding to each of the aggregating buckets). | 7 // vector of numbers corresponding to each of the aggregating buckets). |
8 // See header file for details and examples. | 8 // See header file for details and examples. |
9 | 9 |
10 #include "base/metrics/histogram.h" | 10 #include "base/metrics/histogram.h" |
(...skipping 13 matching lines...) Expand all Loading... | |
24 #include "base/synchronization/lock.h" | 24 #include "base/synchronization/lock.h" |
25 | 25 |
26 using std::string; | 26 using std::string; |
27 using std::vector; | 27 using std::vector; |
28 | 28 |
29 namespace base { | 29 namespace base { |
30 | 30 |
31 typedef HistogramBase::Count Count; | 31 typedef HistogramBase::Count Count; |
32 typedef HistogramBase::Sample Sample; | 32 typedef HistogramBase::Sample Sample; |
33 | 33 |
34 BucketHistogramSamples::BucketHistogramSamples( | |
35 const BucketRanges* bucket_ranges) | |
36 : counts_(bucket_ranges->size() - 1), | |
37 bucket_ranges_(bucket_ranges) { | |
38 CHECK_GE(bucket_ranges_->size(), 2u); | |
39 } | |
40 BucketHistogramSamples::~BucketHistogramSamples() {} | |
41 | |
42 void BucketHistogramSamples::Accumulate(Sample value, Count count) { | |
43 size_t bucket_index = GetBucketIndex(value, bucket_ranges_); | |
44 counts_[bucket_index] += count; | |
45 IncreaseSum(count * value); | |
46 IncreaseRedundantCount(count); | |
47 } | |
48 | |
49 Count BucketHistogramSamples::GetCount(Sample value) const { | |
50 size_t bucket_index = GetBucketIndex(value, bucket_ranges_); | |
51 return counts_[bucket_index]; | |
52 } | |
53 | |
54 Count BucketHistogramSamples::TotalCount() const { | |
55 Count count = 0; | |
56 for (size_t i = 0; i < counts_.size(); i++) { | |
57 count += counts_[i]; | |
58 } | |
59 return count; | |
60 } | |
61 | |
62 Count BucketHistogramSamples::GetCountFromBucketIndex( | |
63 size_t bucket_index) const { | |
64 DCHECK(bucket_index >= 0 && bucket_index < counts_.size()); | |
65 return counts_[bucket_index]; | |
66 } | |
67 | |
68 scoped_ptr<SampleCountIterator> BucketHistogramSamples::Iterator() const { | |
69 return scoped_ptr<SampleCountIterator>( | |
70 new BucketHistogramSamplesIterator(&counts_, bucket_ranges_)); | |
71 } | |
72 | |
73 bool BucketHistogramSamples::AddSubtractImpl(SampleCountIterator* iter, | |
74 bool is_add) { | |
75 HistogramBase::Sample min; | |
76 HistogramBase::Sample max; | |
77 HistogramBase::Count count; | |
78 | |
79 // Go through the iterator and add the counts into correct bucket. | |
80 size_t index = 0; | |
81 while (index < counts_.size() && !iter->Done()) { | |
82 iter->Get(&min, &max, &count); | |
83 if (min == bucket_ranges_->range(index) && | |
84 max == bucket_ranges_->range(index + 1)) { | |
85 // Sample matches this bucket! | |
86 counts_[index] += is_add ? count : -count; | |
87 iter->Next(); | |
88 } else if (min > bucket_ranges_->range(index)) { | |
89 // Sample is larger than current bucket range. Try next. | |
90 index++; | |
91 } else { | |
92 // Sample is smaller than current bucket range. We scan buckets from | |
93 // smallest to largest, so the sample value must be run. | |
Ilya Sherman
2012/08/29 23:41:27
What does "must be run" mean?
kaiwang
2012/08/30 03:13:22
typo, changed to invalid
| |
94 return false; | |
95 } | |
96 } | |
97 | |
98 return iter->Done(); | |
99 } | |
100 | |
101 // Use simple binary search. This is very general, but there are better | |
102 // approaches if we knew that the buckets were linearly distributed. | |
103 // | |
104 // static | |
105 size_t BucketHistogramSamples::GetBucketIndex( | |
106 Sample value, const BucketRanges* bucket_ranges) { | |
107 size_t bucket_count = bucket_ranges->size() - 1; | |
108 CHECK_GE(bucket_count, 1u); | |
109 CHECK_GE(value, bucket_ranges->range(0)); | |
110 CHECK_LT(value, bucket_ranges->range(bucket_count)); | |
111 | |
112 size_t under = 0; | |
113 size_t over = bucket_count; | |
114 size_t mid; | |
115 do { | |
116 mid = under + (over - under)/2; | |
117 if (mid == under) | |
118 break; | |
119 if (bucket_ranges->range(mid) <= value) | |
120 under = mid; | |
121 else | |
122 over = mid; | |
123 } while (true); | |
124 return mid; | |
125 } | |
126 | |
127 BucketHistogramSamplesIterator::BucketHistogramSamplesIterator( | |
128 const vector<Count>* counts, | |
129 const BucketRanges* bucket_ranges) | |
130 : counts_(counts), | |
131 bucket_ranges_(bucket_ranges), | |
132 index_(-1), | |
133 is_done_(false) { | |
134 CHECK_GT(bucket_ranges_->size(), counts_->size()); | |
135 ForwardToNextNonemptyBucket(); | |
136 } | |
137 | |
138 bool BucketHistogramSamplesIterator::Done() const { | |
139 return is_done_; | |
140 } | |
141 | |
142 void BucketHistogramSamplesIterator::Next() { | |
143 ForwardToNextNonemptyBucket(); | |
144 } | |
145 | |
146 void BucketHistogramSamplesIterator::Get(HistogramBase::Sample* min, | |
147 HistogramBase::Sample* max, | |
148 HistogramBase::Count* count) const { | |
149 CHECK(!Done()); | |
150 if (min != NULL) | |
151 *min = bucket_ranges_->range(index_); | |
152 if (max != NULL) | |
153 *max = bucket_ranges_->range(index_ + 1); | |
154 if (count != NULL) | |
155 *count = (*counts_)[index_]; | |
156 } | |
157 | |
158 void BucketHistogramSamplesIterator::ForwardToNextNonemptyBucket() { | |
159 CHECK(!Done()); | |
160 | |
161 while (index_ < static_cast<int>(counts_->size()) - 1) { | |
162 index_++; | |
163 if ((*counts_)[index_] != 0) | |
164 return; | |
165 } | |
166 is_done_ = true; | |
167 } | |
168 | |
34 // static | 169 // static |
35 const size_t Histogram::kBucketCount_MAX = 16384u; | 170 const size_t Histogram::kBucketCount_MAX = 16384u; |
36 | 171 |
37 Histogram::SampleSet::SampleSet(size_t size) | |
38 : counts_(size, 0), | |
39 sum_(0), | |
40 redundant_count_(0) {} | |
41 | |
42 Histogram::SampleSet::SampleSet() | |
43 : counts_(), | |
44 sum_(0), | |
45 redundant_count_(0) {} | |
46 | |
47 Histogram::SampleSet::~SampleSet() {} | |
48 | |
49 void Histogram::SampleSet::Resize(size_t size) { | |
50 counts_.resize(size, 0); | |
51 } | |
52 | |
53 void Histogram::SampleSet::Accumulate(Sample value, Count count, | |
54 size_t index) { | |
55 DCHECK(count == 1 || count == -1); | |
56 counts_[index] += count; | |
57 sum_ += count * value; | |
58 redundant_count_ += count; | |
59 DCHECK_GE(counts_[index], 0); | |
60 DCHECK_GE(sum_, 0); | |
61 DCHECK_GE(redundant_count_, 0); | |
62 } | |
63 | |
64 Count Histogram::SampleSet::TotalCount() const { | |
65 Count total = 0; | |
66 for (Counts::const_iterator it = counts_.begin(); | |
67 it != counts_.end(); | |
68 ++it) { | |
69 total += *it; | |
70 } | |
71 return total; | |
72 } | |
73 | |
74 void Histogram::SampleSet::Add(const SampleSet& other) { | |
75 DCHECK_EQ(counts_.size(), other.counts_.size()); | |
76 sum_ += other.sum_; | |
77 redundant_count_ += other.redundant_count_; | |
78 for (size_t index = 0; index < counts_.size(); ++index) | |
79 counts_[index] += other.counts_[index]; | |
80 } | |
81 | |
82 void Histogram::SampleSet::Subtract(const SampleSet& other) { | |
83 DCHECK_EQ(counts_.size(), other.counts_.size()); | |
84 // Note: Race conditions in snapshotting a sum may lead to (temporary) | |
85 // negative values when snapshots are later combined (and deltas calculated). | |
86 // As a result, we don't currently CHCEK() for positive values. | |
87 sum_ -= other.sum_; | |
88 redundant_count_ -= other.redundant_count_; | |
89 for (size_t index = 0; index < counts_.size(); ++index) { | |
90 counts_[index] -= other.counts_[index]; | |
91 DCHECK_GE(counts_[index], 0); | |
92 } | |
93 } | |
94 | |
95 bool Histogram::SampleSet::Serialize(Pickle* pickle) const { | |
96 pickle->WriteInt64(sum_); | |
97 pickle->WriteInt64(redundant_count_); | |
98 pickle->WriteUInt64(counts_.size()); | |
99 | |
100 for (size_t index = 0; index < counts_.size(); ++index) { | |
101 pickle->WriteInt(counts_[index]); | |
102 } | |
103 | |
104 return true; | |
105 } | |
106 | |
107 bool Histogram::SampleSet::Deserialize(PickleIterator* iter) { | |
108 DCHECK_EQ(counts_.size(), 0u); | |
109 DCHECK_EQ(sum_, 0); | |
110 DCHECK_EQ(redundant_count_, 0); | |
111 | |
112 uint64 counts_size; | |
113 | |
114 if (!iter->ReadInt64(&sum_) || | |
115 !iter->ReadInt64(&redundant_count_) || | |
116 !iter->ReadUInt64(&counts_size)) { | |
117 return false; | |
118 } | |
119 | |
120 if (counts_size == 0) | |
121 return false; | |
122 | |
123 int count = 0; | |
124 for (uint64 index = 0; index < counts_size; ++index) { | |
125 int i; | |
126 if (!iter->ReadInt(&i)) | |
127 return false; | |
128 counts_.push_back(i); | |
129 count += i; | |
130 } | |
131 DCHECK_EQ(count, redundant_count_); | |
132 return count == redundant_count_; | |
133 } | |
134 | |
135 // TODO(rtenneti): delete this code after debugging. | 172 // TODO(rtenneti): delete this code after debugging. |
136 void CheckCorruption(const Histogram& histogram, bool new_histogram) { | 173 void CheckCorruption(const Histogram& histogram, bool new_histogram) { |
137 const std::string& histogram_name = histogram.histogram_name(); | 174 const std::string& histogram_name = histogram.histogram_name(); |
138 char histogram_name_buf[128]; | 175 char histogram_name_buf[128]; |
139 base::strlcpy(histogram_name_buf, | 176 base::strlcpy(histogram_name_buf, |
140 histogram_name.c_str(), | 177 histogram_name.c_str(), |
141 arraysize(histogram_name_buf)); | 178 arraysize(histogram_name_buf)); |
142 base::debug::Alias(histogram_name_buf); | 179 base::debug::Alias(histogram_name_buf); |
143 | 180 |
144 bool debug_new_histogram[1]; | 181 bool debug_new_histogram[1]; |
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
238 if (next > current) | 275 if (next > current) |
239 current = next; | 276 current = next; |
240 else | 277 else |
241 ++current; // Just do a narrow bucket, and keep trying. | 278 ++current; // Just do a narrow bucket, and keep trying. |
242 ranges->set_range(bucket_index, current); | 279 ranges->set_range(bucket_index, current); |
243 } | 280 } |
244 ranges->set_range(ranges->size() - 1, HistogramBase::kSampleType_MAX); | 281 ranges->set_range(ranges->size() - 1, HistogramBase::kSampleType_MAX); |
245 ranges->ResetChecksum(); | 282 ranges->ResetChecksum(); |
246 } | 283 } |
247 | 284 |
248 // static | |
249 void Histogram::Add(int value) { | 285 void Histogram::Add(int value) { |
250 if (value > kSampleType_MAX - 1) | 286 if (value > ranges(bucket_count_) - 1) |
251 value = kSampleType_MAX - 1; | 287 value = ranges(bucket_count_) - 1; |
252 if (value < 0) | 288 if (value < ranges(0)) |
253 value = 0; | 289 value = ranges(0); |
254 size_t index = BucketIndex(value); | 290 samples_->Accumulate(value, 1); |
255 DCHECK_GE(value, ranges(index)); | |
256 DCHECK_LT(value, ranges(index + 1)); | |
257 Accumulate(value, 1, index); | |
258 } | 291 } |
259 | 292 |
260 void Histogram::AddBoolean(bool value) { | 293 void Histogram::AddBoolean(bool value) { |
261 DCHECK(false); | 294 DCHECK(false); |
262 } | 295 } |
263 | 296 |
264 void Histogram::AddSampleSet(const SampleSet& sample) { | 297 void Histogram::AddSamples(const HistogramSamples& samples) { |
265 sample_.Add(sample); | 298 samples_->Add(samples); |
299 } | |
300 | |
301 bool Histogram::AddSamplesFromPickle(PickleIterator* iter) { | |
302 return samples_->AddFromPickle(iter); | |
266 } | 303 } |
267 | 304 |
268 void Histogram::SetRangeDescriptions(const DescriptionPair descriptions[]) { | 305 void Histogram::SetRangeDescriptions(const DescriptionPair descriptions[]) { |
269 DCHECK(false); | 306 DCHECK(false); |
270 } | 307 } |
271 | 308 |
272 // The following methods provide a graphical histogram display. | 309 // The following methods provide a graphical histogram display. |
273 void Histogram::WriteHTMLGraph(string* output) const { | 310 void Histogram::WriteHTMLGraph(string* output) const { |
274 // TBD(jar) Write a nice HTML bar chart, with divs an mouse-overs etc. | 311 // TBD(jar) Write a nice HTML bar chart, with divs an mouse-overs etc. |
275 output->append("<PRE>"); | 312 output->append("<PRE>"); |
276 WriteAsciiImpl(true, "<br>", output); | 313 WriteAsciiImpl(true, "<br>", output); |
277 output->append("</PRE>"); | 314 output->append("</PRE>"); |
278 } | 315 } |
279 | 316 |
280 void Histogram::WriteAscii(string* output) const { | 317 void Histogram::WriteAscii(string* output) const { |
281 WriteAsciiImpl(true, "\n", output); | 318 WriteAsciiImpl(true, "\n", output); |
282 } | 319 } |
283 | 320 |
284 // static | 321 // static |
285 string Histogram::SerializeHistogramInfo(const Histogram& histogram, | 322 string Histogram::SerializeHistogramInfo(const Histogram& histogram, |
286 const SampleSet& snapshot) { | 323 const HistogramSamples& snapshot) { |
287 DCHECK_NE(NOT_VALID_IN_RENDERER, histogram.histogram_type()); | 324 DCHECK_NE(NOT_VALID_IN_RENDERER, histogram.histogram_type()); |
288 DCHECK(histogram.bucket_ranges()->HasValidChecksum()); | 325 DCHECK(histogram.bucket_ranges()->HasValidChecksum()); |
289 | 326 |
290 Pickle pickle; | 327 Pickle pickle; |
291 pickle.WriteString(histogram.histogram_name()); | 328 pickle.WriteString(histogram.histogram_name()); |
292 pickle.WriteInt(histogram.declared_min()); | 329 pickle.WriteInt(histogram.declared_min()); |
293 pickle.WriteInt(histogram.declared_max()); | 330 pickle.WriteInt(histogram.declared_max()); |
294 pickle.WriteUInt64(histogram.bucket_count()); | 331 pickle.WriteUInt64(histogram.bucket_count()); |
295 pickle.WriteUInt32(histogram.bucket_ranges()->checksum()); | 332 pickle.WriteUInt32(histogram.bucket_ranges()->checksum()); |
296 pickle.WriteInt(histogram.histogram_type()); | 333 pickle.WriteInt(histogram.histogram_type()); |
297 pickle.WriteInt(histogram.flags()); | 334 pickle.WriteInt(histogram.flags()); |
298 | 335 |
336 histogram.SerializeRanges(&pickle); | |
337 | |
299 snapshot.Serialize(&pickle); | 338 snapshot.Serialize(&pickle); |
300 | 339 |
301 histogram.SerializeRanges(&pickle); | |
302 | |
303 return string(static_cast<const char*>(pickle.data()), pickle.size()); | 340 return string(static_cast<const char*>(pickle.data()), pickle.size()); |
304 } | 341 } |
305 | 342 |
306 // static | 343 // static |
307 bool Histogram::DeserializeHistogramInfo(const string& histogram_info) { | 344 bool Histogram::DeserializeHistogramInfo(const string& histogram_info) { |
308 if (histogram_info.empty()) { | 345 if (histogram_info.empty()) { |
309 return false; | 346 return false; |
310 } | 347 } |
311 | 348 |
312 Pickle pickle(histogram_info.data(), | 349 Pickle pickle(histogram_info.data(), |
313 static_cast<int>(histogram_info.size())); | 350 static_cast<int>(histogram_info.size())); |
314 string histogram_name; | 351 string histogram_name; |
315 int declared_min; | 352 int declared_min; |
316 int declared_max; | 353 int declared_max; |
317 uint64 bucket_count; | 354 uint64 bucket_count; |
318 uint32 range_checksum; | 355 uint32 range_checksum; |
319 int histogram_type; | 356 int histogram_type; |
320 int pickle_flags; | 357 int pickle_flags; |
321 SampleSet sample; | |
322 | 358 |
323 PickleIterator iter(pickle); | 359 PickleIterator iter(pickle); |
324 if (!iter.ReadString(&histogram_name) || | 360 if (!iter.ReadString(&histogram_name) || |
325 !iter.ReadInt(&declared_min) || | 361 !iter.ReadInt(&declared_min) || |
326 !iter.ReadInt(&declared_max) || | 362 !iter.ReadInt(&declared_max) || |
327 !iter.ReadUInt64(&bucket_count) || | 363 !iter.ReadUInt64(&bucket_count) || |
328 !iter.ReadUInt32(&range_checksum) || | 364 !iter.ReadUInt32(&range_checksum) || |
329 !iter.ReadInt(&histogram_type) || | 365 !iter.ReadInt(&histogram_type) || |
330 !iter.ReadInt(&pickle_flags) || | 366 !iter.ReadInt(&pickle_flags)) { |
331 !sample.Histogram::SampleSet::Deserialize(&iter)) { | |
332 DLOG(ERROR) << "Pickle error decoding Histogram: " << histogram_name; | 367 DLOG(ERROR) << "Pickle error decoding Histogram: " << histogram_name; |
333 return false; | 368 return false; |
334 } | 369 } |
335 | 370 |
336 DCHECK(pickle_flags & kIPCSerializationSourceFlag); | 371 DCHECK(pickle_flags & kIPCSerializationSourceFlag); |
337 // Since these fields may have come from an untrusted renderer, do additional | 372 // Since these fields may have come from an untrusted renderer, do additional |
338 // checks above and beyond those in Histogram::Initialize() | 373 // checks above and beyond those in Histogram::Initialize() |
339 if (declared_max <= 0 || declared_min <= 0 || declared_max < declared_min || | 374 if (declared_max <= 0 || declared_min <= 0 || declared_max < declared_min || |
340 INT_MAX / sizeof(Count) <= bucket_count || bucket_count < 2) { | 375 INT_MAX / sizeof(Count) <= bucket_count || bucket_count < 2) { |
341 DLOG(ERROR) << "Values error decoding Histogram: " << histogram_name; | 376 DLOG(ERROR) << "Values error decoding Histogram: " << histogram_name; |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
375 DCHECK_EQ(render_histogram->bucket_count(), bucket_count); | 410 DCHECK_EQ(render_histogram->bucket_count(), bucket_count); |
376 DCHECK_EQ(render_histogram->histogram_type(), histogram_type); | 411 DCHECK_EQ(render_histogram->histogram_type(), histogram_type); |
377 | 412 |
378 if (render_histogram->bucket_ranges()->checksum() != range_checksum) { | 413 if (render_histogram->bucket_ranges()->checksum() != range_checksum) { |
379 return false; | 414 return false; |
380 } | 415 } |
381 | 416 |
382 if (render_histogram->flags() & kIPCSerializationSourceFlag) { | 417 if (render_histogram->flags() & kIPCSerializationSourceFlag) { |
383 DVLOG(1) << "Single process mode, histogram observed and not copied: " | 418 DVLOG(1) << "Single process mode, histogram observed and not copied: " |
384 << histogram_name; | 419 << histogram_name; |
385 } else { | 420 return true; |
386 DCHECK_EQ(flags & render_histogram->flags(), flags); | |
387 render_histogram->AddSampleSet(sample); | |
388 } | 421 } |
389 | 422 |
390 return true; | 423 DCHECK_EQ(flags & render_histogram->flags(), flags); |
424 return render_histogram->AddSamplesFromPickle(&iter); | |
391 } | 425 } |
392 | 426 |
427 // static | |
428 const int Histogram::kCommonRaceBasedCountMismatch = 5; | |
393 | 429 |
394 // Validate a sample and related histogram. | |
395 Histogram::Inconsistencies Histogram::FindCorruption( | 430 Histogram::Inconsistencies Histogram::FindCorruption( |
396 const SampleSet& snapshot) const { | 431 const HistogramSamples& samples) const { |
397 int inconsistencies = NO_INCONSISTENCIES; | 432 int inconsistencies = NO_INCONSISTENCIES; |
398 Sample previous_range = -1; // Bottom range is always 0. | 433 Sample previous_range = -1; // Bottom range is always 0. |
399 int64 count = 0; | |
400 for (size_t index = 0; index < bucket_count(); ++index) { | 434 for (size_t index = 0; index < bucket_count(); ++index) { |
401 count += snapshot.counts(index); | |
402 int new_range = ranges(index); | 435 int new_range = ranges(index); |
403 if (previous_range >= new_range) | 436 if (previous_range >= new_range) |
404 inconsistencies |= BUCKET_ORDER_ERROR; | 437 inconsistencies |= BUCKET_ORDER_ERROR; |
405 previous_range = new_range; | 438 previous_range = new_range; |
406 } | 439 } |
407 | 440 |
408 if (!bucket_ranges()->HasValidChecksum()) | 441 if (!bucket_ranges()->HasValidChecksum()) |
409 inconsistencies |= RANGE_CHECKSUM_ERROR; | 442 inconsistencies |= RANGE_CHECKSUM_ERROR; |
410 | 443 |
411 int64 delta64 = snapshot.redundant_count() - count; | 444 int64 delta64 = samples.redundant_count() - samples.TotalCount(); |
412 if (delta64 != 0) { | 445 if (delta64 != 0) { |
413 int delta = static_cast<int>(delta64); | 446 int delta = static_cast<int>(delta64); |
414 if (delta != delta64) | 447 if (delta != delta64) |
415 delta = INT_MAX; // Flag all giant errors as INT_MAX. | 448 delta = INT_MAX; // Flag all giant errors as INT_MAX. |
416 // Since snapshots of histograms are taken asynchronously relative to | |
417 // sampling (and snapped from different threads), it is pretty likely that | |
418 // we'll catch a redundant count that doesn't match the sample count. We | |
419 // allow for a certain amount of slop before flagging this as an | |
420 // inconsistency. Even with an inconsistency, we'll snapshot it again (for | |
421 // UMA in about a half hour, so we'll eventually get the data, if it was | |
422 // not the result of a corruption. If histograms show that 1 is "too tight" | |
423 // then we may try to use 2 or 3 for this slop value. | |
424 const int kCommonRaceBasedCountMismatch = 5; | |
425 if (delta > 0) { | 449 if (delta > 0) { |
426 UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountHigh", delta); | 450 UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountHigh", delta); |
427 if (delta > kCommonRaceBasedCountMismatch) | 451 if (delta > kCommonRaceBasedCountMismatch) |
428 inconsistencies |= COUNT_HIGH_ERROR; | 452 inconsistencies |= COUNT_HIGH_ERROR; |
429 } else { | 453 } else { |
430 DCHECK_GT(0, delta); | 454 DCHECK_GT(0, delta); |
431 UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountLow", -delta); | 455 UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountLow", -delta); |
432 if (-delta > kCommonRaceBasedCountMismatch) | 456 if (-delta > kCommonRaceBasedCountMismatch) |
433 inconsistencies |= COUNT_LOW_ERROR; | 457 inconsistencies |= COUNT_LOW_ERROR; |
434 } | 458 } |
435 } | 459 } |
436 return static_cast<Inconsistencies>(inconsistencies); | 460 return static_cast<Inconsistencies>(inconsistencies); |
437 } | 461 } |
438 | 462 |
439 Histogram::ClassType Histogram::histogram_type() const { | 463 Histogram::ClassType Histogram::histogram_type() const { |
440 return HISTOGRAM; | 464 return HISTOGRAM; |
441 } | 465 } |
442 | 466 |
443 Sample Histogram::ranges(size_t i) const { | 467 Sample Histogram::ranges(size_t i) const { |
444 return bucket_ranges_->range(i); | 468 return bucket_ranges_->range(i); |
445 } | 469 } |
446 | 470 |
447 size_t Histogram::bucket_count() const { | 471 size_t Histogram::bucket_count() const { |
448 return bucket_count_; | 472 return bucket_count_; |
449 } | 473 } |
450 | 474 |
451 // Do a safe atomic snapshot of sample data. | 475 scoped_ptr<BucketHistogramSamples> Histogram::SnapshotSamples() const { |
452 // This implementation assumes we are on a safe single thread. | 476 scoped_ptr<BucketHistogramSamples> samples( |
453 void Histogram::SnapshotSample(SampleSet* sample) const { | 477 new BucketHistogramSamples(bucket_ranges())); |
454 // Note locking not done in this version!!! | 478 samples->Add(*samples_); |
455 *sample = sample_; | 479 return samples.Pass(); |
456 } | 480 } |
457 | 481 |
458 bool Histogram::HasConstructionArguments(Sample minimum, | 482 bool Histogram::HasConstructionArguments(Sample minimum, |
459 Sample maximum, | 483 Sample maximum, |
460 size_t bucket_count) { | 484 size_t bucket_count) { |
461 return ((minimum == declared_min_) && (maximum == declared_max_) && | 485 return ((minimum == declared_min_) && (maximum == declared_max_) && |
462 (bucket_count == bucket_count_)); | 486 (bucket_count == bucket_count_)); |
463 } | 487 } |
464 | 488 |
465 Histogram::Histogram(const string& name, | 489 Histogram::Histogram(const string& name, |
466 Sample minimum, | 490 Sample minimum, |
467 Sample maximum, | 491 Sample maximum, |
468 size_t bucket_count, | 492 size_t bucket_count, |
469 const BucketRanges* ranges) | 493 const BucketRanges* ranges) |
470 : HistogramBase(name), | 494 : HistogramBase(name), |
471 bucket_ranges_(ranges), | 495 bucket_ranges_(ranges), |
472 declared_min_(minimum), | 496 declared_min_(minimum), |
473 declared_max_(maximum), | 497 declared_max_(maximum), |
474 bucket_count_(bucket_count), | 498 bucket_count_(bucket_count) { |
475 sample_(bucket_count) {} | 499 if (ranges) |
500 samples_.reset(new BucketHistogramSamples(ranges)); | |
501 } | |
476 | 502 |
477 Histogram::~Histogram() { | 503 Histogram::~Histogram() { |
478 if (StatisticsRecorder::dump_on_exit()) { | 504 if (StatisticsRecorder::dump_on_exit()) { |
479 string output; | 505 string output; |
480 WriteAsciiImpl(true, "\n", &output); | 506 WriteAsciiImpl(true, "\n", &output); |
481 DLOG(INFO) << output; | 507 DLOG(INFO) << output; |
482 } | 508 } |
483 } | 509 } |
484 | 510 |
485 // static | 511 // static |
(...skipping 26 matching lines...) Expand all Loading... | |
512 } | 538 } |
513 | 539 |
514 bool Histogram::SerializeRanges(Pickle* pickle) const { | 540 bool Histogram::SerializeRanges(Pickle* pickle) const { |
515 return true; | 541 return true; |
516 } | 542 } |
517 | 543 |
518 bool Histogram::PrintEmptyBucket(size_t index) const { | 544 bool Histogram::PrintEmptyBucket(size_t index) const { |
519 return true; | 545 return true; |
520 } | 546 } |
521 | 547 |
522 size_t Histogram::BucketIndex(Sample value) const { | |
523 // Use simple binary search. This is very general, but there are better | |
524 // approaches if we knew that the buckets were linearly distributed. | |
525 DCHECK_LE(ranges(0), value); | |
526 DCHECK_GT(ranges(bucket_count()), value); | |
527 size_t under = 0; | |
528 size_t over = bucket_count(); | |
529 size_t mid; | |
530 | |
531 do { | |
532 DCHECK_GE(over, under); | |
533 mid = under + (over - under)/2; | |
534 if (mid == under) | |
535 break; | |
536 if (ranges(mid) <= value) | |
537 under = mid; | |
538 else | |
539 over = mid; | |
540 } while (true); | |
541 | |
542 DCHECK_LE(ranges(mid), value); | |
543 CHECK_GT(ranges(mid+1), value); | |
544 return mid; | |
545 } | |
546 | |
547 // Use the actual bucket widths (like a linear histogram) until the widths get | 548 // Use the actual bucket widths (like a linear histogram) until the widths get |
548 // over some transition value, and then use that transition width. Exponentials | 549 // over some transition value, and then use that transition width. Exponentials |
549 // get so big so fast (and we don't expect to see a lot of entries in the large | 550 // get so big so fast (and we don't expect to see a lot of entries in the large |
550 // buckets), so we need this to make it possible to see what is going on and | 551 // buckets), so we need this to make it possible to see what is going on and |
551 // not have 0-graphical-height buckets. | 552 // not have 0-graphical-height buckets. |
552 double Histogram::GetBucketSize(Count current, size_t i) const { | 553 double Histogram::GetBucketSize(Count current, size_t i) const { |
553 DCHECK_GT(ranges(i + 1), ranges(i)); | 554 DCHECK_GT(ranges(i + 1), ranges(i)); |
554 static const double kTransitionWidth = 5; | 555 static const double kTransitionWidth = 5; |
555 double denominator = ranges(i + 1) - ranges(i); | 556 double denominator = ranges(i + 1) - ranges(i); |
556 if (denominator > kTransitionWidth) | 557 if (denominator > kTransitionWidth) |
557 denominator = kTransitionWidth; // Stop trying to normalize. | 558 denominator = kTransitionWidth; // Stop trying to normalize. |
558 return current/denominator; | 559 return current/denominator; |
559 } | 560 } |
560 | 561 |
561 const string Histogram::GetAsciiBucketRange(size_t i) const { | 562 const string Histogram::GetAsciiBucketRange(size_t i) const { |
562 string result; | 563 string result; |
563 if (kHexRangePrintingFlag & flags()) | 564 if (kHexRangePrintingFlag & flags()) |
564 StringAppendF(&result, "%#x", ranges(i)); | 565 StringAppendF(&result, "%#x", ranges(i)); |
565 else | 566 else |
566 StringAppendF(&result, "%d", ranges(i)); | 567 StringAppendF(&result, "%d", ranges(i)); |
567 return result; | 568 return result; |
568 } | 569 } |
569 | 570 |
570 // Update histogram data with new sample. | |
571 void Histogram::Accumulate(Sample value, Count count, size_t index) { | |
572 // Note locking not done in this version!!! | |
573 sample_.Accumulate(value, count, index); | |
574 } | |
575 | |
576 //------------------------------------------------------------------------------ | 571 //------------------------------------------------------------------------------ |
577 // Private methods | 572 // Private methods |
578 | 573 |
579 void Histogram::WriteAsciiImpl(bool graph_it, | 574 void Histogram::WriteAsciiImpl(bool graph_it, |
580 const string& newline, | 575 const string& newline, |
581 string* output) const { | 576 string* output) const { |
582 // Get local (stack) copies of all effectively volatile class data so that we | 577 // Get local (stack) copies of all effectively volatile class data so that we |
583 // are consistent across our output activities. | 578 // are consistent across our output activities. |
584 SampleSet snapshot; | 579 scoped_ptr<BucketHistogramSamples> snapshot = SnapshotSamples(); |
585 SnapshotSample(&snapshot); | 580 Count sample_count = snapshot->TotalCount(); |
586 Count sample_count = snapshot.TotalCount(); | |
587 | 581 |
588 WriteAsciiHeader(snapshot, sample_count, output); | 582 WriteAsciiHeader(*snapshot, sample_count, output); |
589 output->append(newline); | 583 output->append(newline); |
590 | 584 |
591 // Prepare to normalize graphical rendering of bucket contents. | 585 // Prepare to normalize graphical rendering of bucket contents. |
592 double max_size = 0; | 586 double max_size = 0; |
593 if (graph_it) | 587 if (graph_it) |
594 max_size = GetPeakBucketSize(snapshot); | 588 max_size = GetPeakBucketSize(*snapshot); |
595 | 589 |
596 // Calculate space needed to print bucket range numbers. Leave room to print | 590 // Calculate space needed to print bucket range numbers. Leave room to print |
597 // nearly the largest bucket range without sliding over the histogram. | 591 // nearly the largest bucket range without sliding over the histogram. |
598 size_t largest_non_empty_bucket = bucket_count() - 1; | 592 size_t largest_non_empty_bucket = bucket_count() - 1; |
599 while (0 == snapshot.counts(largest_non_empty_bucket)) { | 593 while (0 == snapshot->GetCountFromBucketIndex(largest_non_empty_bucket)) { |
600 if (0 == largest_non_empty_bucket) | 594 if (0 == largest_non_empty_bucket) |
601 break; // All buckets are empty. | 595 break; // All buckets are empty. |
602 --largest_non_empty_bucket; | 596 --largest_non_empty_bucket; |
603 } | 597 } |
604 | 598 |
605 // Calculate largest print width needed for any of our bucket range displays. | 599 // Calculate largest print width needed for any of our bucket range displays. |
606 size_t print_width = 1; | 600 size_t print_width = 1; |
607 for (size_t i = 0; i < bucket_count(); ++i) { | 601 for (size_t i = 0; i < bucket_count(); ++i) { |
608 if (snapshot.counts(i)) { | 602 if (snapshot->GetCountFromBucketIndex(i)) { |
609 size_t width = GetAsciiBucketRange(i).size() + 1; | 603 size_t width = GetAsciiBucketRange(i).size() + 1; |
610 if (width > print_width) | 604 if (width > print_width) |
611 print_width = width; | 605 print_width = width; |
612 } | 606 } |
613 } | 607 } |
614 | 608 |
615 int64 remaining = sample_count; | 609 int64 remaining = sample_count; |
616 int64 past = 0; | 610 int64 past = 0; |
617 // Output the actual histogram graph. | 611 // Output the actual histogram graph. |
618 for (size_t i = 0; i < bucket_count(); ++i) { | 612 for (size_t i = 0; i < bucket_count(); ++i) { |
619 Count current = snapshot.counts(i); | 613 Count current = snapshot->GetCountFromBucketIndex(i); |
620 if (!current && !PrintEmptyBucket(i)) | 614 if (!current && !PrintEmptyBucket(i)) |
621 continue; | 615 continue; |
622 remaining -= current; | 616 remaining -= current; |
623 string range = GetAsciiBucketRange(i); | 617 string range = GetAsciiBucketRange(i); |
624 output->append(range); | 618 output->append(range); |
625 for (size_t j = 0; range.size() + j < print_width + 1; ++j) | 619 for (size_t j = 0; range.size() + j < print_width + 1; ++j) |
626 output->push_back(' '); | 620 output->push_back(' '); |
627 if (0 == current && i < bucket_count() - 1 && 0 == snapshot.counts(i + 1)) { | 621 if (0 == current && i < bucket_count() - 1 && |
628 while (i < bucket_count() - 1 && 0 == snapshot.counts(i + 1)) | 622 0 == snapshot->GetCountFromBucketIndex(i + 1)) { |
623 while (i < bucket_count() - 1 && | |
624 0 == snapshot->GetCountFromBucketIndex(i + 1)) { | |
629 ++i; | 625 ++i; |
626 } | |
630 output->append("... "); | 627 output->append("... "); |
631 output->append(newline); | 628 output->append(newline); |
632 continue; // No reason to plot emptiness. | 629 continue; // No reason to plot emptiness. |
633 } | 630 } |
634 double current_size = GetBucketSize(current, i); | 631 double current_size = GetBucketSize(current, i); |
635 if (graph_it) | 632 if (graph_it) |
636 WriteAsciiBucketGraph(current_size, max_size, output); | 633 WriteAsciiBucketGraph(current_size, max_size, output); |
637 WriteAsciiBucketContext(past, current, remaining, i, output); | 634 WriteAsciiBucketContext(past, current, remaining, i, output); |
638 output->append(newline); | 635 output->append(newline); |
639 past += current; | 636 past += current; |
640 } | 637 } |
641 DCHECK_EQ(sample_count, past); | 638 DCHECK_EQ(sample_count, past); |
642 } | 639 } |
643 | 640 |
644 double Histogram::GetPeakBucketSize(const SampleSet& snapshot) const { | 641 double Histogram::GetPeakBucketSize( |
642 const BucketHistogramSamples& samples) const { | |
645 double max = 0; | 643 double max = 0; |
646 for (size_t i = 0; i < bucket_count() ; ++i) { | 644 for (size_t i = 0; i < bucket_count() ; ++i) { |
647 double current_size = GetBucketSize(snapshot.counts(i), i); | 645 double current_size = GetBucketSize(samples.GetCountFromBucketIndex(i), i); |
648 if (current_size > max) | 646 if (current_size > max) |
649 max = current_size; | 647 max = current_size; |
650 } | 648 } |
651 return max; | 649 return max; |
652 } | 650 } |
653 | 651 |
654 void Histogram::WriteAsciiHeader(const SampleSet& snapshot, | 652 void Histogram::WriteAsciiHeader(const BucketHistogramSamples& samples, |
655 Count sample_count, | 653 Count sample_count, |
656 string* output) const { | 654 string* output) const { |
657 StringAppendF(output, | 655 StringAppendF(output, |
658 "Histogram: %s recorded %d samples", | 656 "Histogram: %s recorded %d samples", |
659 histogram_name().c_str(), | 657 histogram_name().c_str(), |
660 sample_count); | 658 sample_count); |
661 if (0 == sample_count) { | 659 if (0 == sample_count) { |
662 DCHECK_EQ(snapshot.sum(), 0); | 660 DCHECK_EQ(samples.sum(), 0); |
663 } else { | 661 } else { |
664 double average = static_cast<float>(snapshot.sum()) / sample_count; | 662 double average = static_cast<float>(samples.sum()) / sample_count; |
665 | 663 |
666 StringAppendF(output, ", average = %.1f", average); | 664 StringAppendF(output, ", average = %.1f", average); |
667 } | 665 } |
668 if (flags() & ~kHexRangePrintingFlag) | 666 if (flags() & ~kHexRangePrintingFlag) |
669 StringAppendF(output, " (flags = 0x%x)", flags() & ~kHexRangePrintingFlag); | 667 StringAppendF(output, " (flags = 0x%x)", flags() & ~kHexRangePrintingFlag); |
670 } | 668 } |
671 | 669 |
672 void Histogram::WriteAsciiBucketContext(const int64 past, | 670 void Histogram::WriteAsciiBucketContext(const int64 past, |
673 const Count current, | 671 const Count current, |
674 const int64 remaining, | 672 const int64 remaining, |
(...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
957 | 955 |
958 BucketRanges* bucket_ranges = new BucketRanges(ranges.size()); | 956 BucketRanges* bucket_ranges = new BucketRanges(ranges.size()); |
959 for (size_t i = 0; i < ranges.size(); i++) { | 957 for (size_t i = 0; i < ranges.size(); i++) { |
960 bucket_ranges->set_range(i, ranges[i]); | 958 bucket_ranges->set_range(i, ranges[i]); |
961 } | 959 } |
962 bucket_ranges->ResetChecksum(); | 960 bucket_ranges->ResetChecksum(); |
963 return bucket_ranges; | 961 return bucket_ranges; |
964 } | 962 } |
965 | 963 |
966 } // namespace base | 964 } // namespace base |
OLD | NEW |