Chromium Code Reviews| Index: content/common/gpu/gpu_memory_manager.cc |
| diff --git a/content/common/gpu/gpu_memory_manager.cc b/content/common/gpu/gpu_memory_manager.cc |
| index 163ebf0b3d6199bf943c0b806650c01a28d725d5..a31c892b40ec9a23ade0f74f0483247ea04a0843 100644 |
| --- a/content/common/gpu/gpu_memory_manager.cc |
| +++ b/content/common/gpu/gpu_memory_manager.cc |
| @@ -16,10 +16,13 @@ |
| namespace { |
| // These are predefined values (in bytes) for |
| -// GpuMemoryAllocation::gpuResourceSizeInBytes. Currently, the value is only |
| -// used to check if it is 0 or non-0. In the future, these values will not |
| -// come from constants, but rather will be distributed dynamically. |
| +// GpuMemoryAllocation::gpuResourceSizeInBytes. |
| enum { |
| + kResourceSizeMinimumForVisibleTab = 64 * 1024 * 1024, |
| + kResourceSizeSumOfAllVisibleTabs = |
| + 512 * 1024 * 1024 - kResourceSizeMinimumForVisibleTab, |
|
mmocny
2012/04/18 20:43:11
I subtract kResourceSizeMinumumForVisibleTab from
nduca
2012/04/18 23:16:28
~shrug~ no strong opinions here. Put a rough expla
|
| + kResourceSizeNonVisibleTab = 0, |
| + |
| kResourceSizeNonHibernatedTab = 1, |
|
nduca
2012/04/18 23:16:28
are we still using these?
|
| kResourceSizeHibernatedTab = 0 |
| }; |
| @@ -100,19 +103,12 @@ void GpuMemoryManager::ScheduleManage() { |
| // 1. Find the most visible context-with-a-surface within each |
| // context-without-a-surface's share group, and inherit its visibilty. |
| void GpuMemoryManager::Manage() { |
|
nduca
2012/04/18 23:16:28
im having a really hard time reading these allocat
mmocny
2012/04/20 18:37:55
done.
On 2012/04/18 23:16:28, nduca wrote:
|
| - // Set up three allocation values for the three possible stub states |
| - const GpuMemoryAllocation all_buffers_allocation( |
| - kResourceSizeNonHibernatedTab, true, true); |
| - const GpuMemoryAllocation front_buffers_allocation( |
| - kResourceSizeNonHibernatedTab, false, true); |
| - const GpuMemoryAllocation no_buffers_allocation( |
| - kResourceSizeHibernatedTab, false, false); |
| - |
| manage_scheduled_ = false; |
| // Create stub lists by separating out the two types received from client |
| std::vector<GpuCommandBufferStubBase*> stubs_with_surface; |
| std::vector<GpuCommandBufferStubBase*> stubs_without_surface; |
| + size_t num_visible_stubs_with_surface = 0; |
| { |
| std::vector<GpuCommandBufferStubBase*> stubs; |
| client_->AppendAllCommandBufferStubs(stubs); |
| @@ -120,10 +116,13 @@ void GpuMemoryManager::Manage() { |
| for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin(); |
| it != stubs.end(); ++it) { |
| GpuCommandBufferStubBase* stub = *it; |
| - if (stub->has_surface_state()) |
| + if (stub->has_surface_state()) { |
| stubs_with_surface.push_back(stub); |
| - else |
| + if (stub->surface_state().visible) |
| + ++num_visible_stubs_with_surface; |
| + } else { |
| stubs_without_surface.push_back(stub); |
| + } |
| } |
| } |
| @@ -135,6 +134,18 @@ void GpuMemoryManager::Manage() { |
| DCHECK(std::unique(stubs_with_surface.begin(), stubs_with_surface.end()) == |
| stubs_with_surface.end()); |
| + // Set up allocation values for possible states for stubs with surfaces. |
|
mmocny
2012/04/18 20:43:11
We divide the total desired global limit by the nu
|
| + size_t allocation_for_visible_tabs = |
| + num_visible_stubs_with_surface == 0 ? 0 : |
| + std::max(kResourceSizeSumOfAllVisibleTabs/num_visible_stubs_with_surface, |
| + (size_t)kResourceSizeMinimumForVisibleTab); |
| + const GpuMemoryAllocation all_buffers_allocation( |
| + allocation_for_visible_tabs, true, true); |
| + const GpuMemoryAllocation front_buffers_allocation( |
| + kResourceSizeNonVisibleTab, false, true); |
| + const GpuMemoryAllocation no_buffers_allocation( |
| + kResourceSizeNonVisibleTab, false, false); |
| + |
| // Separate stubs into memory allocation sets. |
| std::vector<GpuCommandBufferStubBase*> all_buffers, front_buffers, no_buffers; |
| @@ -153,19 +164,23 @@ void GpuMemoryManager::Manage() { |
| } |
| } |
| + // Set up allocation values for possible states for stubs without surfaces. |
|
mmocny
2012/04/18 20:43:11
Stubs without surfaces do not receive memory alloc
|
| + const GpuMemoryAllocation non_hibernated_allocation( |
| + kResourceSizeNonHibernatedTab, true, true); |
| + const GpuMemoryAllocation hibernated_allocation( |
| + kResourceSizeHibernatedTab, false, false); |
|
nduca
2012/04/18 23:16:28
I'm not loving that this causes us to give "1" to
mmocny
2012/04/20 18:37:55
Did a big re-factoring here to make it all clearer
|
| + |
| // Now, go through the stubs without surfaces and deduce visibility using the |
| // visibility of stubs which are in the same context share group. |
| for (std::vector<GpuCommandBufferStubBase*>::const_iterator it = |
| stubs_without_surface.begin(); it != stubs_without_surface.end(); ++it) { |
| GpuCommandBufferStubBase* stub = *it; |
| DCHECK(!stub->has_surface_state()); |
| - if (IsInSameContextShareGroupAsAnyOf(stub, all_buffers)) { |
| - stub->SetMemoryAllocation(all_buffers_allocation); |
| - } else if (IsInSameContextShareGroupAsAnyOf(stub, front_buffers)) { |
| - stub->SetMemoryAllocation(front_buffers_allocation); |
| - } else { |
| - stub->SetMemoryAllocation(no_buffers_allocation); |
| - } |
| + if (IsInSameContextShareGroupAsAnyOf(stub, all_buffers) || |
| + IsInSameContextShareGroupAsAnyOf(stub, front_buffers)) |
| + stub->SetMemoryAllocation(non_hibernated_allocation); |
| + else |
| + stub->SetMemoryAllocation(hibernated_allocation); |
| } |
| } |