Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(737)

Side by Side Diff: content/common/gpu/gpu_memory_manager.cc

Issue 10854076: Add GPU memory tab to the task manager. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Clean try errors (remove semicolon, remove unneeded assert that unittests don't follow) Created 8 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/gpu/gpu_memory_manager.h" 5 #include "content/common/gpu/gpu_memory_manager.h"
6 6
7 #if defined(ENABLE_GPU) 7 #if defined(ENABLE_GPU)
8 8
9 #include <algorithm> 9 #include <algorithm>
10 10
11 #include "base/bind.h" 11 #include "base/bind.h"
12 #include "base/command_line.h" 12 #include "base/command_line.h"
13 #include "base/debug/trace_event.h" 13 #include "base/debug/trace_event.h"
14 #include "base/message_loop.h" 14 #include "base/message_loop.h"
15 #include "base/process_util.h"
15 #include "base/string_number_conversions.h" 16 #include "base/string_number_conversions.h"
16 #include "content/common/gpu/gpu_command_buffer_stub.h" 17 #include "content/common/gpu/gpu_command_buffer_stub.h"
17 #include "content/common/gpu/gpu_memory_allocation.h" 18 #include "content/common/gpu/gpu_memory_allocation.h"
18 #include "gpu/command_buffer/service/gpu_switches.h" 19 #include "gpu/command_buffer/service/gpu_switches.h"
19 20
20 namespace { 21 namespace {
21 22
22 const int kDelayedScheduleManageTimeoutMs = 67; 23 const int kDelayedScheduleManageTimeoutMs = 67;
23 24
24 bool IsInSameContextShareGroupAsAnyOf( 25 bool IsInSameContextShareGroupAsAnyOf(
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
85 } else { 86 } else {
86 #if defined(OS_ANDROID) 87 #if defined(OS_ANDROID)
87 bytes_available_gpu_memory_ = 64 * 1024 * 1024; 88 bytes_available_gpu_memory_ = 64 * 1024 * 1024;
88 #else 89 #else
89 bytes_available_gpu_memory_ = 448 * 1024 * 1024; 90 bytes_available_gpu_memory_ = 448 * 1024 * 1024;
90 #endif 91 #endif
91 } 92 }
92 } 93 }
93 94
94 GpuMemoryManager::~GpuMemoryManager() { 95 GpuMemoryManager::~GpuMemoryManager() {
96 DCHECK(tracking_groups_.empty());
95 } 97 }
96 98
97 bool GpuMemoryManager::StubWithSurfaceComparator::operator()( 99 bool GpuMemoryManager::StubWithSurfaceComparator::operator()(
98 GpuCommandBufferStubBase* lhs, 100 GpuCommandBufferStubBase* lhs,
99 GpuCommandBufferStubBase* rhs) { 101 GpuCommandBufferStubBase* rhs) {
100 DCHECK(lhs->has_surface_state() && rhs->has_surface_state()); 102 DCHECK(lhs->has_surface_state() && rhs->has_surface_state());
101 const GpuCommandBufferStubBase::SurfaceState& lhs_ss = lhs->surface_state(); 103 const GpuCommandBufferStubBase::SurfaceState& lhs_ss = lhs->surface_state();
102 const GpuCommandBufferStubBase::SurfaceState& rhs_ss = rhs->surface_state(); 104 const GpuCommandBufferStubBase::SurfaceState& rhs_ss = rhs->surface_state();
103 if (lhs_ss.visible) 105 if (lhs_ss.visible)
104 return !rhs_ss.visible || (lhs_ss.last_used_time > rhs_ss.last_used_time); 106 return !rhs_ss.visible || (lhs_ss.last_used_time > rhs_ss.last_used_time);
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
143 } 145 }
144 } 146 }
145 if (new_size != old_size) { 147 if (new_size != old_size) {
146 TRACE_COUNTER_ID1("GpuMemoryManager", 148 TRACE_COUNTER_ID1("GpuMemoryManager",
147 "GpuMemoryUsage", 149 "GpuMemoryUsage",
148 this, 150 this,
149 bytes_allocated_current_); 151 bytes_allocated_current_);
150 } 152 }
151 } 153 }
152 154
155 void GpuMemoryManager::AddTrackingGroup(GpuMemoryTrackingGroup* tracking_group)
156 {
greggman 2012/08/10 01:23:26 style: { goes at end of previous line
ccameron 2012/08/10 18:13:38 Done.
157 tracking_groups_.insert(tracking_group);
158 }
159
160 void GpuMemoryManager::RemoveTrackingGroup(
161 GpuMemoryTrackingGroup* tracking_group)
greggman 2012/08/10 01:23:26 style: indent 4
ccameron 2012/08/10 18:13:38 Done.
162 {
greggman 2012/08/10 01:23:26 style: { goes at end of previous line
ccameron 2012/08/10 18:13:38 Done.
163 tracking_groups_.erase(tracking_group);
164 }
165
166 void GpuMemoryManager::GetVidmem(content::GPUVidmem& vidmem) const
167 {
greggman 2012/08/10 01:23:26 style: { goes at end of previous line
ccameron 2012/08/10 18:13:38 Done.
168 // For each context group, assign its memory usage to its PID
169 vidmem.process_map.clear();
170 for (std::set<GpuMemoryTrackingGroup*>::const_iterator i =
171 tracking_groups_.begin(); i != tracking_groups_.end(); ++i) {
172 const GpuMemoryTrackingGroup* tracking_group = (*i);
173 vidmem.process_map[tracking_group->GetPid()].vidmem +=
174 tracking_group->GetSize();
greggman 2012/08/10 01:23:26 style: indent 4 from previous line
ccameron 2012/08/10 18:13:38 Done.
175 }
176
177 // Assign the total across all processes in the GPU process
178 vidmem.process_map[base::GetCurrentProcId()].vidmem =
179 bytes_allocated_current_;
greggman 2012/08/10 01:23:26 style: indent 4 from previous line
ccameron 2012/08/10 18:13:38 Done.
180 vidmem.process_map[base::GetCurrentProcId()].has_duplicates = true;
181 }
182
153 // The current Manage algorithm simply classifies contexts (stubs) into 183 // The current Manage algorithm simply classifies contexts (stubs) into
154 // "foreground", "background", or "hibernated" categories. 184 // "foreground", "background", or "hibernated" categories.
155 // For each of these three categories, there are predefined memory allocation 185 // For each of these three categories, there are predefined memory allocation
156 // limits and front/backbuffer states. 186 // limits and front/backbuffer states.
157 // 187 //
158 // Stubs may or may not have a surfaces, and the rules are different for each. 188 // Stubs may or may not have a surfaces, and the rules are different for each.
159 // 189 //
160 // The rules for categorizing contexts with a surface are: 190 // The rules for categorizing contexts with a surface are:
161 // 1. Foreground: All visible surfaces. 191 // 1. Foreground: All visible surfaces.
162 // * Must have both front and back buffer. 192 // * Must have both front and back buffer.
(...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after
301 false); 331 false);
302 332
303 AssignMemoryAllocations( 333 AssignMemoryAllocations(
304 &stub_memory_stats_for_last_manage_, 334 &stub_memory_stats_for_last_manage_,
305 stubs_without_surface_hibernated, 335 stubs_without_surface_hibernated,
306 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers), 336 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers),
307 false); 337 false);
308 } 338 }
309 339
310 #endif 340 #endif
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698