Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(581)

Side by Side Diff: content/common/gpu/gpu_memory_manager.cc

Issue 10083056: GpuMemoryManager suggests values for renderer Contents Texture Managers' preferred memory limit. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 8 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/gpu/gpu_memory_manager.h" 5 #include "content/common/gpu/gpu_memory_manager.h"
6 6
7 #if defined(ENABLE_GPU) 7 #if defined(ENABLE_GPU)
8 8
9 #include <algorithm> 9 #include <algorithm>
10 10
11 #include "base/bind.h" 11 #include "base/bind.h"
12 #include "base/message_loop.h" 12 #include "base/message_loop.h"
13 #include "content/common/gpu/gpu_command_buffer_stub.h" 13 #include "content/common/gpu/gpu_command_buffer_stub.h"
14 #include "content/common/gpu/gpu_memory_allocation.h" 14 #include "content/common/gpu/gpu_memory_allocation.h"
15 15
16 namespace { 16 namespace {
17 17
18 // These are predefined values (in bytes) for 18 // These are predefined values (in bytes) for
19 // GpuMemoryAllocation::gpuResourceSizeInBytes. Currently, the value is only 19 // GpuMemoryAllocation::gpuResourceSizeInBytes.
20 // used to check if it is 0 or non-0. In the future, these values will not
21 // come from constants, but rather will be distributed dynamically.
22 enum { 20 enum {
21 kResourceSizeMinimumForVisibleTab = 64 * 1024 * 1024,
22 kResourceSizeSumOfAllVisibleTabs =
23 512 * 1024 * 1024 - kResourceSizeMinimumForVisibleTab,
mmocny 2012/04/18 20:43:11 I subtract kResourceSizeMinumumForVisibleTab from
nduca 2012/04/18 23:16:28 ~shrug~ no strong opinions here. Put a rough expla
24 kResourceSizeNonVisibleTab = 0,
25
23 kResourceSizeNonHibernatedTab = 1, 26 kResourceSizeNonHibernatedTab = 1,
nduca 2012/04/18 23:16:28 are we still using these?
24 kResourceSizeHibernatedTab = 0 27 kResourceSizeHibernatedTab = 0
25 }; 28 };
26 29
27 bool IsInSameContextShareGroupAsAnyOf( 30 bool IsInSameContextShareGroupAsAnyOf(
28 const GpuCommandBufferStubBase* stub, 31 const GpuCommandBufferStubBase* stub,
29 const std::vector<GpuCommandBufferStubBase*>& stubs) { 32 const std::vector<GpuCommandBufferStubBase*>& stubs) {
30 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it = 33 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it =
31 stubs.begin(); it != stubs.end(); ++it) { 34 stubs.begin(); it != stubs.end(); ++it) {
32 if (stub->IsInSameContextShareGroup(**it)) 35 if (stub->IsInSameContextShareGroup(**it))
33 return true; 36 return true;
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
92 // The considerations for categorizing contexts without a surface are: 95 // The considerations for categorizing contexts without a surface are:
93 // 1. These contexts do not track {visibility,last_used_time}, so cannot 96 // 1. These contexts do not track {visibility,last_used_time}, so cannot
94 // sort them directly. 97 // sort them directly.
95 // 2. These contexts may be used by, and thus affect, other contexts, and so 98 // 2. These contexts may be used by, and thus affect, other contexts, and so
96 // cannot be less visible than any affected context. 99 // cannot be less visible than any affected context.
97 // 3. Contexts belong to share groups within which resources can be shared. 100 // 3. Contexts belong to share groups within which resources can be shared.
98 // 101 //
99 // As such, the rule for categorizing contexts without a surface is: 102 // As such, the rule for categorizing contexts without a surface is:
100 // 1. Find the most visible context-with-a-surface within each 103 // 1. Find the most visible context-with-a-surface within each
101 // context-without-a-surface's share group, and inherit its visibilty. 104 // context-without-a-surface's share group, and inherit its visibilty.
102 void GpuMemoryManager::Manage() { 105 void GpuMemoryManager::Manage() {
nduca 2012/04/18 23:16:28 im having a really hard time reading these allocat
mmocny 2012/04/20 18:37:55 done. On 2012/04/18 23:16:28, nduca wrote:
103 // Set up three allocation values for the three possible stub states
104 const GpuMemoryAllocation all_buffers_allocation(
105 kResourceSizeNonHibernatedTab, true, true);
106 const GpuMemoryAllocation front_buffers_allocation(
107 kResourceSizeNonHibernatedTab, false, true);
108 const GpuMemoryAllocation no_buffers_allocation(
109 kResourceSizeHibernatedTab, false, false);
110
111 manage_scheduled_ = false; 106 manage_scheduled_ = false;
112 107
113 // Create stub lists by separating out the two types received from client 108 // Create stub lists by separating out the two types received from client
114 std::vector<GpuCommandBufferStubBase*> stubs_with_surface; 109 std::vector<GpuCommandBufferStubBase*> stubs_with_surface;
115 std::vector<GpuCommandBufferStubBase*> stubs_without_surface; 110 std::vector<GpuCommandBufferStubBase*> stubs_without_surface;
111 size_t num_visible_stubs_with_surface = 0;
116 { 112 {
117 std::vector<GpuCommandBufferStubBase*> stubs; 113 std::vector<GpuCommandBufferStubBase*> stubs;
118 client_->AppendAllCommandBufferStubs(stubs); 114 client_->AppendAllCommandBufferStubs(stubs);
119 115
120 for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin(); 116 for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin();
121 it != stubs.end(); ++it) { 117 it != stubs.end(); ++it) {
122 GpuCommandBufferStubBase* stub = *it; 118 GpuCommandBufferStubBase* stub = *it;
123 if (stub->has_surface_state()) 119 if (stub->has_surface_state()) {
124 stubs_with_surface.push_back(stub); 120 stubs_with_surface.push_back(stub);
125 else 121 if (stub->surface_state().visible)
122 ++num_visible_stubs_with_surface;
123 } else {
126 stubs_without_surface.push_back(stub); 124 stubs_without_surface.push_back(stub);
125 }
127 } 126 }
128 } 127 }
129 128
130 // Sort stubs with surface into {visibility,last_used_time} order using 129 // Sort stubs with surface into {visibility,last_used_time} order using
131 // custom comparator 130 // custom comparator
132 std::sort(stubs_with_surface.begin(), 131 std::sort(stubs_with_surface.begin(),
133 stubs_with_surface.end(), 132 stubs_with_surface.end(),
134 StubWithSurfaceComparator()); 133 StubWithSurfaceComparator());
135 DCHECK(std::unique(stubs_with_surface.begin(), stubs_with_surface.end()) == 134 DCHECK(std::unique(stubs_with_surface.begin(), stubs_with_surface.end()) ==
136 stubs_with_surface.end()); 135 stubs_with_surface.end());
137 136
137 // Set up allocation values for possible states for stubs with surfaces.
mmocny 2012/04/18 20:43:11 We divide the total desired global limit by the nu
138 size_t allocation_for_visible_tabs =
139 num_visible_stubs_with_surface == 0 ? 0 :
140 std::max(kResourceSizeSumOfAllVisibleTabs/num_visible_stubs_with_surface,
141 (size_t)kResourceSizeMinimumForVisibleTab);
142 const GpuMemoryAllocation all_buffers_allocation(
143 allocation_for_visible_tabs, true, true);
144 const GpuMemoryAllocation front_buffers_allocation(
145 kResourceSizeNonVisibleTab, false, true);
146 const GpuMemoryAllocation no_buffers_allocation(
147 kResourceSizeNonVisibleTab, false, false);
148
138 // Separate stubs into memory allocation sets. 149 // Separate stubs into memory allocation sets.
139 std::vector<GpuCommandBufferStubBase*> all_buffers, front_buffers, no_buffers; 150 std::vector<GpuCommandBufferStubBase*> all_buffers, front_buffers, no_buffers;
140 151
141 for (size_t i = 0; i < stubs_with_surface.size(); ++i) { 152 for (size_t i = 0; i < stubs_with_surface.size(); ++i) {
142 GpuCommandBufferStubBase* stub = stubs_with_surface[i]; 153 GpuCommandBufferStubBase* stub = stubs_with_surface[i];
143 DCHECK(stub->has_surface_state()); 154 DCHECK(stub->has_surface_state());
144 if (stub->surface_state().visible) { 155 if (stub->surface_state().visible) {
145 all_buffers.push_back(stub); 156 all_buffers.push_back(stub);
146 stub->SetMemoryAllocation(all_buffers_allocation); 157 stub->SetMemoryAllocation(all_buffers_allocation);
147 } else if (i < max_surfaces_with_frontbuffer_soft_limit_) { 158 } else if (i < max_surfaces_with_frontbuffer_soft_limit_) {
148 front_buffers.push_back(stub); 159 front_buffers.push_back(stub);
149 stub->SetMemoryAllocation(front_buffers_allocation); 160 stub->SetMemoryAllocation(front_buffers_allocation);
150 } else { 161 } else {
151 no_buffers.push_back(stub); 162 no_buffers.push_back(stub);
152 stub->SetMemoryAllocation(no_buffers_allocation); 163 stub->SetMemoryAllocation(no_buffers_allocation);
153 } 164 }
154 } 165 }
155 166
167 // Set up allocation values for possible states for stubs without surfaces.
mmocny 2012/04/18 20:43:11 Stubs without surfaces do not receive memory alloc
168 const GpuMemoryAllocation non_hibernated_allocation(
169 kResourceSizeNonHibernatedTab, true, true);
170 const GpuMemoryAllocation hibernated_allocation(
171 kResourceSizeHibernatedTab, false, false);
nduca 2012/04/18 23:16:28 I'm not loving that this causes us to give "1" to
mmocny 2012/04/20 18:37:55 Did a big re-factoring here to make it all clearer
172
156 // Now, go through the stubs without surfaces and deduce visibility using the 173 // Now, go through the stubs without surfaces and deduce visibility using the
157 // visibility of stubs which are in the same context share group. 174 // visibility of stubs which are in the same context share group.
158 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it = 175 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it =
159 stubs_without_surface.begin(); it != stubs_without_surface.end(); ++it) { 176 stubs_without_surface.begin(); it != stubs_without_surface.end(); ++it) {
160 GpuCommandBufferStubBase* stub = *it; 177 GpuCommandBufferStubBase* stub = *it;
161 DCHECK(!stub->has_surface_state()); 178 DCHECK(!stub->has_surface_state());
162 if (IsInSameContextShareGroupAsAnyOf(stub, all_buffers)) { 179 if (IsInSameContextShareGroupAsAnyOf(stub, all_buffers) ||
163 stub->SetMemoryAllocation(all_buffers_allocation); 180 IsInSameContextShareGroupAsAnyOf(stub, front_buffers))
164 } else if (IsInSameContextShareGroupAsAnyOf(stub, front_buffers)) { 181 stub->SetMemoryAllocation(non_hibernated_allocation);
165 stub->SetMemoryAllocation(front_buffers_allocation); 182 else
166 } else { 183 stub->SetMemoryAllocation(hibernated_allocation);
167 stub->SetMemoryAllocation(no_buffers_allocation);
168 }
169 } 184 }
170 } 185 }
171 186
172 #endif 187 #endif
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698