Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1132)

Side by Side Diff: content/common/gpu/gpu_memory_manager.cc

Issue 11187010: Rename SurfaceState to MemoryManagerState (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Fix trybot warnings Created 8 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/gpu/gpu_memory_manager.h" 5 #include "content/common/gpu/gpu_memory_manager.h"
6 6
7 #if defined(ENABLE_GPU) 7 #if defined(ENABLE_GPU)
8 8
9 #include <algorithm> 9 #include <algorithm>
10 10
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after
129 GetDefaultAvailableGpuMemory()); 129 GetDefaultAvailableGpuMemory());
130 130
131 // And never go above 1GB 131 // And never go above 1GB
132 bytes_available_gpu_memory_ = std::min(bytes_available_gpu_memory_, 132 bytes_available_gpu_memory_ = std::min(bytes_available_gpu_memory_,
133 static_cast<size_t>(1024*1024*1024)); 133 static_cast<size_t>(1024*1024*1024));
134 } 134 }
135 135
136 bool GpuMemoryManager::StubWithSurfaceComparator::operator()( 136 bool GpuMemoryManager::StubWithSurfaceComparator::operator()(
137 GpuCommandBufferStubBase* lhs, 137 GpuCommandBufferStubBase* lhs,
138 GpuCommandBufferStubBase* rhs) { 138 GpuCommandBufferStubBase* rhs) {
139 DCHECK(lhs->has_surface_state() && rhs->has_surface_state()); 139 DCHECK(lhs->memory_manager_state().has_surface &&
140 const GpuCommandBufferStubBase::SurfaceState& lhs_ss = lhs->surface_state(); 140 rhs->memory_manager_state().has_surface);
141 const GpuCommandBufferStubBase::SurfaceState& rhs_ss = rhs->surface_state(); 141 const GpuCommandBufferStubBase::MemoryManagerState& lhs_mms =
142 if (lhs_ss.visible) 142 lhs->memory_manager_state();
143 return !rhs_ss.visible || (lhs_ss.last_used_time > rhs_ss.last_used_time); 143 const GpuCommandBufferStubBase::MemoryManagerState& rhs_mms =
144 rhs->memory_manager_state();
145 if (lhs_mms.visible)
146 return !rhs_mms.visible || (lhs_mms.last_used_time >
147 rhs_mms.last_used_time);
144 else 148 else
145 return !rhs_ss.visible && (lhs_ss.last_used_time > rhs_ss.last_used_time); 149 return !rhs_mms.visible && (lhs_mms.last_used_time >
150 rhs_mms.last_used_time);
146 }; 151 };
147 152
148 void GpuMemoryManager::ScheduleManage(bool immediate) { 153 void GpuMemoryManager::ScheduleManage(bool immediate) {
149 if (manage_immediate_scheduled_) 154 if (manage_immediate_scheduled_)
150 return; 155 return;
151 if (immediate) { 156 if (immediate) {
152 MessageLoop::current()->PostTask( 157 MessageLoop::current()->PostTask(
153 FROM_HERE, 158 FROM_HERE,
154 base::Bind(&GpuMemoryManager::Manage, AsWeakPtr())); 159 base::Bind(&GpuMemoryManager::Manage, AsWeakPtr()));
155 manage_immediate_scheduled_ = true; 160 manage_immediate_scheduled_ = true;
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
260 // Create stub lists by separating out the two types received from client 265 // Create stub lists by separating out the two types received from client
261 std::vector<GpuCommandBufferStubBase*> stubs_with_surface; 266 std::vector<GpuCommandBufferStubBase*> stubs_with_surface;
262 std::vector<GpuCommandBufferStubBase*> stubs_without_surface; 267 std::vector<GpuCommandBufferStubBase*> stubs_without_surface;
263 { 268 {
264 std::vector<GpuCommandBufferStubBase*> stubs; 269 std::vector<GpuCommandBufferStubBase*> stubs;
265 client_->AppendAllCommandBufferStubs(stubs); 270 client_->AppendAllCommandBufferStubs(stubs);
266 271
267 for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin(); 272 for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin();
268 it != stubs.end(); ++it) { 273 it != stubs.end(); ++it) {
269 GpuCommandBufferStubBase* stub = *it; 274 GpuCommandBufferStubBase* stub = *it;
270 if (!stub->client_has_memory_allocation_changed_callback()) 275 if (!stub->memory_manager_state().
276 client_has_memory_allocation_changed_callback)
271 continue; 277 continue;
272 if (stub->has_surface_state()) 278 if (stub->memory_manager_state().has_surface)
273 stubs_with_surface.push_back(stub); 279 stubs_with_surface.push_back(stub);
274 else 280 else
275 stubs_without_surface.push_back(stub); 281 stubs_without_surface.push_back(stub);
276 } 282 }
277 } 283 }
278 284
279 // Sort stubs with surface into {visibility,last_used_time} order using 285 // Sort stubs with surface into {visibility,last_used_time} order using
280 // custom comparator 286 // custom comparator
281 std::sort(stubs_with_surface.begin(), 287 std::sort(stubs_with_surface.begin(),
282 stubs_with_surface.end(), 288 stubs_with_surface.end(),
283 StubWithSurfaceComparator()); 289 StubWithSurfaceComparator());
284 DCHECK(std::unique(stubs_with_surface.begin(), stubs_with_surface.end()) == 290 DCHECK(std::unique(stubs_with_surface.begin(), stubs_with_surface.end()) ==
285 stubs_with_surface.end()); 291 stubs_with_surface.end());
286 292
287 // Separate stubs into memory allocation sets. 293 // Separate stubs into memory allocation sets.
288 std::vector<GpuCommandBufferStubBase*> stubs_with_surface_foreground, 294 std::vector<GpuCommandBufferStubBase*> stubs_with_surface_foreground,
289 stubs_with_surface_background, 295 stubs_with_surface_background,
290 stubs_with_surface_hibernated, 296 stubs_with_surface_hibernated,
291 stubs_without_surface_foreground, 297 stubs_without_surface_foreground,
292 stubs_without_surface_background, 298 stubs_without_surface_background,
293 stubs_without_surface_hibernated; 299 stubs_without_surface_hibernated;
294 300
295 for (size_t i = 0; i < stubs_with_surface.size(); ++i) { 301 for (size_t i = 0; i < stubs_with_surface.size(); ++i) {
296 GpuCommandBufferStubBase* stub = stubs_with_surface[i]; 302 GpuCommandBufferStubBase* stub = stubs_with_surface[i];
297 DCHECK(stub->has_surface_state()); 303 DCHECK(stub->memory_manager_state().has_surface);
298 if (stub->surface_state().visible) 304 if (stub->memory_manager_state().visible)
299 stubs_with_surface_foreground.push_back(stub); 305 stubs_with_surface_foreground.push_back(stub);
300 else if (i < max_surfaces_with_frontbuffer_soft_limit_) 306 else if (i < max_surfaces_with_frontbuffer_soft_limit_)
301 stubs_with_surface_background.push_back(stub); 307 stubs_with_surface_background.push_back(stub);
302 else 308 else
303 stubs_with_surface_hibernated.push_back(stub); 309 stubs_with_surface_hibernated.push_back(stub);
304 } 310 }
305 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it = 311 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it =
306 stubs_without_surface.begin(); it != stubs_without_surface.end(); ++it) { 312 stubs_without_surface.begin(); it != stubs_without_surface.end(); ++it) {
307 GpuCommandBufferStubBase* stub = *it; 313 GpuCommandBufferStubBase* stub = *it;
308 DCHECK(!stub->has_surface_state()); 314 DCHECK(!stub->memory_manager_state().has_surface);
309 315
310 // Stubs without surfaces have deduced allocation state using the state 316 // Stubs without surfaces have deduced allocation state using the state
311 // of surface stubs which are in the same context share group. 317 // of surface stubs which are in the same context share group.
312 if (IsInSameContextShareGroupAsAnyOf(stub, stubs_with_surface_foreground)) 318 if (IsInSameContextShareGroupAsAnyOf(stub, stubs_with_surface_foreground))
313 stubs_without_surface_foreground.push_back(stub); 319 stubs_without_surface_foreground.push_back(stub);
314 else if (IsInSameContextShareGroupAsAnyOf( 320 else if (IsInSameContextShareGroupAsAnyOf(
315 stub, stubs_with_surface_background)) 321 stub, stubs_with_surface_background))
316 stubs_without_surface_background.push_back(stub); 322 stubs_without_surface_background.push_back(stub);
317 else 323 else
318 stubs_without_surface_hibernated.push_back(stub); 324 stubs_without_surface_hibernated.push_back(stub);
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
394 false); 400 false);
395 401
396 AssignMemoryAllocations( 402 AssignMemoryAllocations(
397 &stub_memory_stats_for_last_manage_, 403 &stub_memory_stats_for_last_manage_,
398 stubs_without_surface_hibernated, 404 stubs_without_surface_hibernated,
399 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers), 405 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers),
400 false); 406 false);
401 } 407 }
402 408
403 #endif 409 #endif
OLDNEW
« no previous file with comments | « content/common/gpu/gpu_command_buffer_stub.cc ('k') | content/common/gpu/gpu_memory_manager_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698