Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(216)

Side by Side Diff: content/common/gpu/gpu_memory_manager.cc

Issue 10854076: Add GPU memory tab to the task manager. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Incorporate review feedback Created 8 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/gpu/gpu_memory_manager.h" 5 #include "content/common/gpu/gpu_memory_manager.h"
6 6
7 #if defined(ENABLE_GPU) 7 #if defined(ENABLE_GPU)
8 8
9 #include <algorithm> 9 #include <algorithm>
10 10
11 #include "base/bind.h" 11 #include "base/bind.h"
12 #include "base/command_line.h" 12 #include "base/command_line.h"
13 #include "base/debug/trace_event.h" 13 #include "base/debug/trace_event.h"
14 #include "base/message_loop.h" 14 #include "base/message_loop.h"
15 #include "base/process_util.h"
15 #include "base/string_number_conversions.h" 16 #include "base/string_number_conversions.h"
16 #include "content/common/gpu/gpu_command_buffer_stub.h" 17 #include "content/common/gpu/gpu_command_buffer_stub.h"
17 #include "content/common/gpu/gpu_memory_allocation.h" 18 #include "content/common/gpu/gpu_memory_allocation.h"
19 #include "content/common/gpu/gpu_memory_tracking.h"
18 #include "gpu/command_buffer/service/gpu_switches.h" 20 #include "gpu/command_buffer/service/gpu_switches.h"
19 21
20 namespace { 22 namespace {
21 23
22 const int kDelayedScheduleManageTimeoutMs = 67; 24 const int kDelayedScheduleManageTimeoutMs = 67;
23 25
24 bool IsInSameContextShareGroupAsAnyOf( 26 bool IsInSameContextShareGroupAsAnyOf(
25 const GpuCommandBufferStubBase* stub, 27 const GpuCommandBufferStubBase* stub,
26 const std::vector<GpuCommandBufferStubBase*>& stubs) { 28 const std::vector<GpuCommandBufferStubBase*>& stubs) {
27 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it = 29 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it =
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
85 } else { 87 } else {
86 #if defined(OS_ANDROID) 88 #if defined(OS_ANDROID)
87 bytes_available_gpu_memory_ = 64 * 1024 * 1024; 89 bytes_available_gpu_memory_ = 64 * 1024 * 1024;
88 #else 90 #else
89 bytes_available_gpu_memory_ = 448 * 1024 * 1024; 91 bytes_available_gpu_memory_ = 448 * 1024 * 1024;
90 #endif 92 #endif
91 } 93 }
92 } 94 }
93 95
94 GpuMemoryManager::~GpuMemoryManager() { 96 GpuMemoryManager::~GpuMemoryManager() {
97 DCHECK(tracking_groups_.empty());
95 } 98 }
96 99
97 bool GpuMemoryManager::StubWithSurfaceComparator::operator()( 100 bool GpuMemoryManager::StubWithSurfaceComparator::operator()(
98 GpuCommandBufferStubBase* lhs, 101 GpuCommandBufferStubBase* lhs,
99 GpuCommandBufferStubBase* rhs) { 102 GpuCommandBufferStubBase* rhs) {
100 DCHECK(lhs->has_surface_state() && rhs->has_surface_state()); 103 DCHECK(lhs->has_surface_state() && rhs->has_surface_state());
101 const GpuCommandBufferStubBase::SurfaceState& lhs_ss = lhs->surface_state(); 104 const GpuCommandBufferStubBase::SurfaceState& lhs_ss = lhs->surface_state();
102 const GpuCommandBufferStubBase::SurfaceState& rhs_ss = rhs->surface_state(); 105 const GpuCommandBufferStubBase::SurfaceState& rhs_ss = rhs->surface_state();
103 if (lhs_ss.visible) 106 if (lhs_ss.visible)
104 return !rhs_ss.visible || (lhs_ss.last_used_time > rhs_ss.last_used_time); 107 return !rhs_ss.visible || (lhs_ss.last_used_time > rhs_ss.last_used_time);
(...skipping 17 matching lines...) Expand all
122 delayed_manage_callback_.Reset(base::Bind(&GpuMemoryManager::Manage, 125 delayed_manage_callback_.Reset(base::Bind(&GpuMemoryManager::Manage,
123 AsWeakPtr())); 126 AsWeakPtr()));
124 MessageLoop::current()->PostDelayedTask( 127 MessageLoop::current()->PostDelayedTask(
125 FROM_HERE, 128 FROM_HERE,
126 delayed_manage_callback_.callback(), 129 delayed_manage_callback_.callback(),
127 base::TimeDelta::FromMilliseconds(kDelayedScheduleManageTimeoutMs)); 130 base::TimeDelta::FromMilliseconds(kDelayedScheduleManageTimeoutMs));
128 } 131 }
129 } 132 }
130 133
131 void GpuMemoryManager::TrackMemoryAllocatedChange(size_t old_size, 134 void GpuMemoryManager::TrackMemoryAllocatedChange(size_t old_size,
132 size_t new_size) 135 size_t new_size) {
133 {
134 if (new_size < old_size) { 136 if (new_size < old_size) {
135 size_t delta = old_size - new_size; 137 size_t delta = old_size - new_size;
136 DCHECK(bytes_allocated_current_ >= delta); 138 DCHECK(bytes_allocated_current_ >= delta);
137 bytes_allocated_current_ -= delta; 139 bytes_allocated_current_ -= delta;
138 } else { 140 } else {
139 size_t delta = new_size - old_size; 141 size_t delta = new_size - old_size;
140 bytes_allocated_current_ += delta; 142 bytes_allocated_current_ += delta;
141 if (bytes_allocated_current_ > bytes_allocated_historical_max_) { 143 if (bytes_allocated_current_ > bytes_allocated_historical_max_) {
142 bytes_allocated_historical_max_ = bytes_allocated_current_; 144 bytes_allocated_historical_max_ = bytes_allocated_current_;
143 } 145 }
144 } 146 }
145 if (new_size != old_size) { 147 if (new_size != old_size) {
146 TRACE_COUNTER_ID1("GpuMemoryManager", 148 TRACE_COUNTER_ID1("GpuMemoryManager",
147 "GpuMemoryUsage", 149 "GpuMemoryUsage",
148 this, 150 this,
149 bytes_allocated_current_); 151 bytes_allocated_current_);
150 } 152 }
151 } 153 }
152 154
155 void GpuMemoryManager::AddTrackingGroup(
156 GpuMemoryTrackingGroup* tracking_group) {
157 tracking_groups_.insert(tracking_group);
158 }
159
160 void GpuMemoryManager::RemoveTrackingGroup(
161 GpuMemoryTrackingGroup* tracking_group) {
162 tracking_groups_.erase(tracking_group);
163 }
164
165 void GpuMemoryManager::GetVideoMemoryUsageStats(
166 content::GPUVideoMemoryUsageStats& video_memory_usage_stats) const {
167 // For each context group, assign its memory usage to its PID
168 video_memory_usage_stats.process_map.clear();
169 for (std::set<GpuMemoryTrackingGroup*>::const_iterator i =
170 tracking_groups_.begin(); i != tracking_groups_.end(); ++i) {
171 const GpuMemoryTrackingGroup* tracking_group = (*i);
172 video_memory_usage_stats.process_map[
173 tracking_group->GetPid()].video_memory += tracking_group->GetSize();
174 }
175
176 // Assign the total across all processes in the GPU process
177 video_memory_usage_stats.process_map[
178 base::GetCurrentProcId()].video_memory = bytes_allocated_current_;
179 video_memory_usage_stats.process_map[
180 base::GetCurrentProcId()].has_duplicates = true;
181 }
182
153 // The current Manage algorithm simply classifies contexts (stubs) into 183 // The current Manage algorithm simply classifies contexts (stubs) into
154 // "foreground", "background", or "hibernated" categories. 184 // "foreground", "background", or "hibernated" categories.
155 // For each of these three categories, there are predefined memory allocation 185 // For each of these three categories, there are predefined memory allocation
156 // limits and front/backbuffer states. 186 // limits and front/backbuffer states.
157 // 187 //
158 // Stubs may or may not have a surfaces, and the rules are different for each. 188 // Stubs may or may not have a surfaces, and the rules are different for each.
159 // 189 //
160 // The rules for categorizing contexts with a surface are: 190 // The rules for categorizing contexts with a surface are:
161 // 1. Foreground: All visible surfaces. 191 // 1. Foreground: All visible surfaces.
162 // * Must have both front and back buffer. 192 // * Must have both front and back buffer.
(...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after
301 false); 331 false);
302 332
303 AssignMemoryAllocations( 333 AssignMemoryAllocations(
304 &stub_memory_stats_for_last_manage_, 334 &stub_memory_stats_for_last_manage_,
305 stubs_without_surface_hibernated, 335 stubs_without_surface_hibernated,
306 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers), 336 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers),
307 false); 337 false);
308 } 338 }
309 339
310 #endif 340 #endif
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698