| Index: base/metrics/histogram.cc
|
| ===================================================================
|
| --- base/metrics/histogram.cc (revision 155400)
|
| +++ base/metrics/histogram.cc (working copy)
|
| @@ -34,104 +34,6 @@
|
| // static
|
| const size_t Histogram::kBucketCount_MAX = 16384u;
|
|
|
| -Histogram::SampleSet::SampleSet(size_t size)
|
| - : counts_(size, 0),
|
| - sum_(0),
|
| - redundant_count_(0) {}
|
| -
|
| -Histogram::SampleSet::SampleSet()
|
| - : counts_(),
|
| - sum_(0),
|
| - redundant_count_(0) {}
|
| -
|
| -Histogram::SampleSet::~SampleSet() {}
|
| -
|
| -void Histogram::SampleSet::Resize(size_t size) {
|
| - counts_.resize(size, 0);
|
| -}
|
| -
|
| -void Histogram::SampleSet::Accumulate(Sample value, Count count,
|
| - size_t index) {
|
| - DCHECK(count == 1 || count == -1);
|
| - counts_[index] += count;
|
| - sum_ += count * value;
|
| - redundant_count_ += count;
|
| - DCHECK_GE(counts_[index], 0);
|
| - DCHECK_GE(sum_, 0);
|
| - DCHECK_GE(redundant_count_, 0);
|
| -}
|
| -
|
| -Count Histogram::SampleSet::TotalCount() const {
|
| - Count total = 0;
|
| - for (Counts::const_iterator it = counts_.begin();
|
| - it != counts_.end();
|
| - ++it) {
|
| - total += *it;
|
| - }
|
| - return total;
|
| -}
|
| -
|
| -void Histogram::SampleSet::Add(const SampleSet& other) {
|
| - DCHECK_EQ(counts_.size(), other.counts_.size());
|
| - sum_ += other.sum_;
|
| - redundant_count_ += other.redundant_count_;
|
| - for (size_t index = 0; index < counts_.size(); ++index)
|
| - counts_[index] += other.counts_[index];
|
| -}
|
| -
|
| -void Histogram::SampleSet::Subtract(const SampleSet& other) {
|
| - DCHECK_EQ(counts_.size(), other.counts_.size());
|
| - // Note: Race conditions in snapshotting a sum may lead to (temporary)
|
| - // negative values when snapshots are later combined (and deltas calculated).
|
| - // As a result, we don't currently CHCEK() for positive values.
|
| - sum_ -= other.sum_;
|
| - redundant_count_ -= other.redundant_count_;
|
| - for (size_t index = 0; index < counts_.size(); ++index) {
|
| - counts_[index] -= other.counts_[index];
|
| - DCHECK_GE(counts_[index], 0);
|
| - }
|
| -}
|
| -
|
| -bool Histogram::SampleSet::Serialize(Pickle* pickle) const {
|
| - pickle->WriteInt64(sum_);
|
| - pickle->WriteInt64(redundant_count_);
|
| - pickle->WriteUInt64(counts_.size());
|
| -
|
| - for (size_t index = 0; index < counts_.size(); ++index) {
|
| - pickle->WriteInt(counts_[index]);
|
| - }
|
| -
|
| - return true;
|
| -}
|
| -
|
| -bool Histogram::SampleSet::Deserialize(PickleIterator* iter) {
|
| - DCHECK_EQ(counts_.size(), 0u);
|
| - DCHECK_EQ(sum_, 0);
|
| - DCHECK_EQ(redundant_count_, 0);
|
| -
|
| - uint64 counts_size;
|
| -
|
| - if (!iter->ReadInt64(&sum_) ||
|
| - !iter->ReadInt64(&redundant_count_) ||
|
| - !iter->ReadUInt64(&counts_size)) {
|
| - return false;
|
| - }
|
| -
|
| - if (counts_size == 0)
|
| - return false;
|
| -
|
| - int count = 0;
|
| - for (uint64 index = 0; index < counts_size; ++index) {
|
| - int i;
|
| - if (!iter->ReadInt(&i))
|
| - return false;
|
| - counts_.push_back(i);
|
| - count += i;
|
| - }
|
| - DCHECK_EQ(count, redundant_count_);
|
| - return count == redundant_count_;
|
| -}
|
| -
|
| // TODO(rtenneti): delete this code after debugging.
|
| void CheckCorruption(const Histogram& histogram, bool new_histogram) {
|
| const std::string& histogram_name = histogram.histogram_name();
|
| @@ -245,26 +147,29 @@
|
| ranges->ResetChecksum();
|
| }
|
|
|
| -// static
|
| void Histogram::Add(int value) {
|
| + DCHECK_EQ(0, ranges(0));
|
| + DCHECK_EQ(kSampleType_MAX, ranges(bucket_count_));
|
| +
|
| if (value > kSampleType_MAX - 1)
|
| value = kSampleType_MAX - 1;
|
| if (value < 0)
|
| value = 0;
|
| - size_t index = BucketIndex(value);
|
| - DCHECK_GE(value, ranges(index));
|
| - DCHECK_LT(value, ranges(index + 1));
|
| - Accumulate(value, 1, index);
|
| + samples_->Accumulate(value, 1);
|
| }
|
|
|
| void Histogram::AddBoolean(bool value) {
|
| DCHECK(false);
|
| }
|
|
|
| -void Histogram::AddSampleSet(const SampleSet& sample) {
|
| - sample_.Add(sample);
|
| +void Histogram::AddSamples(const HistogramSamples& samples) {
|
| + samples_->Add(samples);
|
| }
|
|
|
| +bool Histogram::AddSamplesFromPickle(PickleIterator* iter) {
|
| + return samples_->AddFromPickle(iter);
|
| +}
|
| +
|
| void Histogram::SetRangeDescriptions(const DescriptionPair descriptions[]) {
|
| DCHECK(false);
|
| }
|
| @@ -283,7 +188,7 @@
|
|
|
| // static
|
| string Histogram::SerializeHistogramInfo(const Histogram& histogram,
|
| - const SampleSet& snapshot) {
|
| + const HistogramSamples& snapshot) {
|
| DCHECK_NE(NOT_VALID_IN_RENDERER, histogram.histogram_type());
|
| DCHECK(histogram.bucket_ranges()->HasValidChecksum());
|
|
|
| @@ -296,10 +201,10 @@
|
| pickle.WriteInt(histogram.histogram_type());
|
| pickle.WriteInt(histogram.flags());
|
|
|
| - snapshot.Serialize(&pickle);
|
| -
|
| histogram.SerializeRanges(&pickle);
|
|
|
| + snapshot.Serialize(&pickle);
|
| +
|
| return string(static_cast<const char*>(pickle.data()), pickle.size());
|
| }
|
|
|
| @@ -318,7 +223,6 @@
|
| uint32 range_checksum;
|
| int histogram_type;
|
| int pickle_flags;
|
| - SampleSet sample;
|
|
|
| PickleIterator iter(pickle);
|
| if (!iter.ReadString(&histogram_name) ||
|
| @@ -327,8 +231,7 @@
|
| !iter.ReadUInt64(&bucket_count) ||
|
| !iter.ReadUInt32(&range_checksum) ||
|
| !iter.ReadInt(&histogram_type) ||
|
| - !iter.ReadInt(&pickle_flags) ||
|
| - !sample.Histogram::SampleSet::Deserialize(&iter)) {
|
| + !iter.ReadInt(&pickle_flags)) {
|
| DLOG(ERROR) << "Pickle error decoding Histogram: " << histogram_name;
|
| return false;
|
| }
|
| @@ -382,23 +285,21 @@
|
| if (render_histogram->flags() & kIPCSerializationSourceFlag) {
|
| DVLOG(1) << "Single process mode, histogram observed and not copied: "
|
| << histogram_name;
|
| - } else {
|
| - DCHECK_EQ(flags & render_histogram->flags(), flags);
|
| - render_histogram->AddSampleSet(sample);
|
| + return true;
|
| }
|
|
|
| - return true;
|
| + DCHECK_EQ(flags & render_histogram->flags(), flags);
|
| + return render_histogram->AddSamplesFromPickle(&iter);
|
| }
|
|
|
| +// static
|
| +const int Histogram::kCommonRaceBasedCountMismatch = 5;
|
|
|
| -// Validate a sample and related histogram.
|
| Histogram::Inconsistencies Histogram::FindCorruption(
|
| - const SampleSet& snapshot) const {
|
| + const HistogramSamples& samples) const {
|
| int inconsistencies = NO_INCONSISTENCIES;
|
| Sample previous_range = -1; // Bottom range is always 0.
|
| - int64 count = 0;
|
| for (size_t index = 0; index < bucket_count(); ++index) {
|
| - count += snapshot.counts(index);
|
| int new_range = ranges(index);
|
| if (previous_range >= new_range)
|
| inconsistencies |= BUCKET_ORDER_ERROR;
|
| @@ -408,20 +309,11 @@
|
| if (!bucket_ranges()->HasValidChecksum())
|
| inconsistencies |= RANGE_CHECKSUM_ERROR;
|
|
|
| - int64 delta64 = snapshot.redundant_count() - count;
|
| + int64 delta64 = samples.redundant_count() - samples.TotalCount();
|
| if (delta64 != 0) {
|
| int delta = static_cast<int>(delta64);
|
| if (delta != delta64)
|
| delta = INT_MAX; // Flag all giant errors as INT_MAX.
|
| - // Since snapshots of histograms are taken asynchronously relative to
|
| - // sampling (and snapped from different threads), it is pretty likely that
|
| - // we'll catch a redundant count that doesn't match the sample count. We
|
| - // allow for a certain amount of slop before flagging this as an
|
| - // inconsistency. Even with an inconsistency, we'll snapshot it again (for
|
| - // UMA in about a half hour, so we'll eventually get the data, if it was
|
| - // not the result of a corruption. If histograms show that 1 is "too tight"
|
| - // then we may try to use 2 or 3 for this slop value.
|
| - const int kCommonRaceBasedCountMismatch = 5;
|
| if (delta > 0) {
|
| UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountHigh", delta);
|
| if (delta > kCommonRaceBasedCountMismatch)
|
| @@ -448,11 +340,10 @@
|
| return bucket_count_;
|
| }
|
|
|
| -// Do a safe atomic snapshot of sample data.
|
| -// This implementation assumes we are on a safe single thread.
|
| -void Histogram::SnapshotSample(SampleSet* sample) const {
|
| - // Note locking not done in this version!!!
|
| - *sample = sample_;
|
| +scoped_ptr<SampleVector> Histogram::SnapshotSamples() const {
|
| + scoped_ptr<SampleVector> samples(new SampleVector(bucket_ranges()));
|
| + samples->Add(*samples_);
|
| + return samples.Pass();
|
| }
|
|
|
| bool Histogram::HasConstructionArguments(Sample minimum,
|
| @@ -471,8 +362,10 @@
|
| bucket_ranges_(ranges),
|
| declared_min_(minimum),
|
| declared_max_(maximum),
|
| - bucket_count_(bucket_count),
|
| - sample_(bucket_count) {}
|
| + bucket_count_(bucket_count) {
|
| + if (ranges)
|
| + samples_.reset(new SampleVector(ranges));
|
| +}
|
|
|
| Histogram::~Histogram() {
|
| if (StatisticsRecorder::dump_on_exit()) {
|
| @@ -519,31 +412,6 @@
|
| return true;
|
| }
|
|
|
| -size_t Histogram::BucketIndex(Sample value) const {
|
| - // Use simple binary search. This is very general, but there are better
|
| - // approaches if we knew that the buckets were linearly distributed.
|
| - DCHECK_LE(ranges(0), value);
|
| - DCHECK_GT(ranges(bucket_count()), value);
|
| - size_t under = 0;
|
| - size_t over = bucket_count();
|
| - size_t mid;
|
| -
|
| - do {
|
| - DCHECK_GE(over, under);
|
| - mid = under + (over - under)/2;
|
| - if (mid == under)
|
| - break;
|
| - if (ranges(mid) <= value)
|
| - under = mid;
|
| - else
|
| - over = mid;
|
| - } while (true);
|
| -
|
| - DCHECK_LE(ranges(mid), value);
|
| - CHECK_GT(ranges(mid+1), value);
|
| - return mid;
|
| -}
|
| -
|
| // Use the actual bucket widths (like a linear histogram) until the widths get
|
| // over some transition value, and then use that transition width. Exponentials
|
| // get so big so fast (and we don't expect to see a lot of entries in the large
|
| @@ -567,12 +435,6 @@
|
| return result;
|
| }
|
|
|
| -// Update histogram data with new sample.
|
| -void Histogram::Accumulate(Sample value, Count count, size_t index) {
|
| - // Note locking not done in this version!!!
|
| - sample_.Accumulate(value, count, index);
|
| -}
|
| -
|
| //------------------------------------------------------------------------------
|
| // Private methods
|
|
|
| @@ -581,22 +443,21 @@
|
| string* output) const {
|
| // Get local (stack) copies of all effectively volatile class data so that we
|
| // are consistent across our output activities.
|
| - SampleSet snapshot;
|
| - SnapshotSample(&snapshot);
|
| - Count sample_count = snapshot.TotalCount();
|
| + scoped_ptr<SampleVector> snapshot = SnapshotSamples();
|
| + Count sample_count = snapshot->TotalCount();
|
|
|
| - WriteAsciiHeader(snapshot, sample_count, output);
|
| + WriteAsciiHeader(*snapshot, sample_count, output);
|
| output->append(newline);
|
|
|
| // Prepare to normalize graphical rendering of bucket contents.
|
| double max_size = 0;
|
| if (graph_it)
|
| - max_size = GetPeakBucketSize(snapshot);
|
| + max_size = GetPeakBucketSize(*snapshot);
|
|
|
| // Calculate space needed to print bucket range numbers. Leave room to print
|
| // nearly the largest bucket range without sliding over the histogram.
|
| size_t largest_non_empty_bucket = bucket_count() - 1;
|
| - while (0 == snapshot.counts(largest_non_empty_bucket)) {
|
| + while (0 == snapshot->GetCountAtIndex(largest_non_empty_bucket)) {
|
| if (0 == largest_non_empty_bucket)
|
| break; // All buckets are empty.
|
| --largest_non_empty_bucket;
|
| @@ -605,7 +466,7 @@
|
| // Calculate largest print width needed for any of our bucket range displays.
|
| size_t print_width = 1;
|
| for (size_t i = 0; i < bucket_count(); ++i) {
|
| - if (snapshot.counts(i)) {
|
| + if (snapshot->GetCountAtIndex(i)) {
|
| size_t width = GetAsciiBucketRange(i).size() + 1;
|
| if (width > print_width)
|
| print_width = width;
|
| @@ -616,7 +477,7 @@
|
| int64 past = 0;
|
| // Output the actual histogram graph.
|
| for (size_t i = 0; i < bucket_count(); ++i) {
|
| - Count current = snapshot.counts(i);
|
| + Count current = snapshot->GetCountAtIndex(i);
|
| if (!current && !PrintEmptyBucket(i))
|
| continue;
|
| remaining -= current;
|
| @@ -624,9 +485,12 @@
|
| output->append(range);
|
| for (size_t j = 0; range.size() + j < print_width + 1; ++j)
|
| output->push_back(' ');
|
| - if (0 == current && i < bucket_count() - 1 && 0 == snapshot.counts(i + 1)) {
|
| - while (i < bucket_count() - 1 && 0 == snapshot.counts(i + 1))
|
| + if (0 == current && i < bucket_count() - 1 &&
|
| + 0 == snapshot->GetCountAtIndex(i + 1)) {
|
| + while (i < bucket_count() - 1 &&
|
| + 0 == snapshot->GetCountAtIndex(i + 1)) {
|
| ++i;
|
| + }
|
| output->append("... ");
|
| output->append(newline);
|
| continue; // No reason to plot emptiness.
|
| @@ -641,17 +505,17 @@
|
| DCHECK_EQ(sample_count, past);
|
| }
|
|
|
| -double Histogram::GetPeakBucketSize(const SampleSet& snapshot) const {
|
| +double Histogram::GetPeakBucketSize(const SampleVector& samples) const {
|
| double max = 0;
|
| for (size_t i = 0; i < bucket_count() ; ++i) {
|
| - double current_size = GetBucketSize(snapshot.counts(i), i);
|
| + double current_size = GetBucketSize(samples.GetCountAtIndex(i), i);
|
| if (current_size > max)
|
| max = current_size;
|
| }
|
| return max;
|
| }
|
|
|
| -void Histogram::WriteAsciiHeader(const SampleSet& snapshot,
|
| +void Histogram::WriteAsciiHeader(const SampleVector& samples,
|
| Count sample_count,
|
| string* output) const {
|
| StringAppendF(output,
|
| @@ -659,9 +523,9 @@
|
| histogram_name().c_str(),
|
| sample_count);
|
| if (0 == sample_count) {
|
| - DCHECK_EQ(snapshot.sum(), 0);
|
| + DCHECK_EQ(samples.sum(), 0);
|
| } else {
|
| - double average = static_cast<float>(snapshot.sum()) / sample_count;
|
| + double average = static_cast<float>(samples.sum()) / sample_count;
|
|
|
| StringAppendF(output, ", average = %.1f", average);
|
| }
|
|
|