Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(994)

Side by Side Diff: content/common/gpu/gpu_memory_manager.cc

Issue 10083056: GpuMemoryManager suggests values for renderer Contents Texture Managers' preferred memory limit. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Making allocations clearer to read, and changing the distribution to include canvas/webgl etc. Created 8 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/gpu/gpu_memory_manager.h" 5 #include "content/common/gpu/gpu_memory_manager.h"
6 6
7 #if defined(ENABLE_GPU) 7 #if defined(ENABLE_GPU)
8 8
9 #include <algorithm> 9 #include <algorithm>
10 10
11 #include "base/bind.h" 11 #include "base/bind.h"
12 #include "base/message_loop.h" 12 #include "base/message_loop.h"
13 #include "content/common/gpu/gpu_command_buffer_stub.h" 13 #include "content/common/gpu/gpu_command_buffer_stub.h"
14 #include "content/common/gpu/gpu_memory_allocation.h" 14 #include "content/common/gpu/gpu_memory_allocation.h"
15 15
16 namespace { 16 namespace {
17 17
18 // These are predefined values (in bytes) for 18 // These are predefined values (in bytes) for
19 // GpuMemoryAllocation::gpuResourceSizeInBytes. Currently, the value is only 19 // GpuMemoryAllocation::gpuResourceSizeInBytes.
20 // used to check if it is 0 or non-0. In the future, these values will not 20 // Maximum Allocation for all tabs is a soft limit that can be exceeded
21 // come from constants, but rather will be distributed dynamically. 21 // during the time it takes for renderers to respect new allocations, including
22 // when switching tabs or opening a new window.
23 // To alleviate some pressure, we decrease our desired limit by "one tabs'
24 // worth" of memory.
22 enum { 25 enum {
23 kResourceSizeNonHibernatedTab = 1, 26 kMinimumAllocationForTab = 64 * 1024 * 1024,
24 kResourceSizeHibernatedTab = 0 27 kMaximumAllocationForTabs = 512 * 1024 * 1024 - kMinimumAllocationForTab,
25 }; 28 };
26 29
27 bool IsInSameContextShareGroupAsAnyOf( 30 bool IsInSameContextShareGroupAsAnyOf(
28 const GpuCommandBufferStubBase* stub, 31 const GpuCommandBufferStubBase* stub,
29 const std::vector<GpuCommandBufferStubBase*>& stubs) { 32 const std::vector<GpuCommandBufferStubBase*>& stubs) {
30 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it = 33 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it =
31 stubs.begin(); it != stubs.end(); ++it) { 34 stubs.begin(); it != stubs.end(); ++it) {
32 if (stub->IsInSameContextShareGroup(**it)) 35 if (stub->IsInSameContextShareGroup(**it))
33 return true; 36 return true;
34 } 37 }
35 return false; 38 return false;
36 } 39 }
37 40
41 void AssignMemoryAllocations(std::vector<GpuCommandBufferStubBase*>& stubs,
42 GpuMemoryAllocation allocation) {
43 for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin();
44 it != stubs.end(); ++it) {
45 (*it)->SetMemoryAllocation(allocation);
46 }
47 }
48
38 } 49 }
39 50
40 GpuMemoryManager::GpuMemoryManager(GpuMemoryManagerClient* client, 51 GpuMemoryManager::GpuMemoryManager(GpuMemoryManagerClient* client,
41 size_t max_surfaces_with_frontbuffer_soft_limit) 52 size_t max_surfaces_with_frontbuffer_soft_limit)
42 : client_(client), 53 : client_(client),
43 manage_scheduled_(false), 54 manage_scheduled_(false),
44 max_surfaces_with_frontbuffer_soft_limit_( 55 max_surfaces_with_frontbuffer_soft_limit_(
45 max_surfaces_with_frontbuffer_soft_limit), 56 max_surfaces_with_frontbuffer_soft_limit),
46 weak_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { 57 weak_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) {
47 } 58 }
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
93 // 1. These contexts do not track {visibility,last_used_time}, so cannot 104 // 1. These contexts do not track {visibility,last_used_time}, so cannot
94 // sort them directly. 105 // sort them directly.
95 // 2. These contexts may be used by, and thus affect, other contexts, and so 106 // 2. These contexts may be used by, and thus affect, other contexts, and so
96 // cannot be less visible than any affected context. 107 // cannot be less visible than any affected context.
97 // 3. Contexts belong to share groups within which resources can be shared. 108 // 3. Contexts belong to share groups within which resources can be shared.
98 // 109 //
99 // As such, the rule for categorizing contexts without a surface is: 110 // As such, the rule for categorizing contexts without a surface is:
100 // 1. Find the most visible context-with-a-surface within each 111 // 1. Find the most visible context-with-a-surface within each
101 // context-without-a-surface's share group, and inherit its visibilty. 112 // context-without-a-surface's share group, and inherit its visibilty.
102 void GpuMemoryManager::Manage() { 113 void GpuMemoryManager::Manage() {
103 // Set up three allocation values for the three possible stub states
104 const GpuMemoryAllocation all_buffers_allocation(
105 kResourceSizeNonHibernatedTab, true, true);
106 const GpuMemoryAllocation front_buffers_allocation(
107 kResourceSizeNonHibernatedTab, false, true);
108 const GpuMemoryAllocation no_buffers_allocation(
109 kResourceSizeHibernatedTab, false, false);
110
111 manage_scheduled_ = false; 114 manage_scheduled_ = false;
112 115
113 // Create stub lists by separating out the two types received from client 116 // Create stub lists by separating out the two types received from client
114 std::vector<GpuCommandBufferStubBase*> stubs_with_surface; 117 std::vector<GpuCommandBufferStubBase*> stubs_with_surface;
115 std::vector<GpuCommandBufferStubBase*> stubs_without_surface; 118 std::vector<GpuCommandBufferStubBase*> stubs_without_surface;
116 { 119 {
117 std::vector<GpuCommandBufferStubBase*> stubs; 120 std::vector<GpuCommandBufferStubBase*> stubs;
118 client_->AppendAllCommandBufferStubs(stubs); 121 client_->AppendAllCommandBufferStubs(stubs);
119 122
120 for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin(); 123 for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin();
121 it != stubs.end(); ++it) { 124 it != stubs.end(); ++it) {
122 GpuCommandBufferStubBase* stub = *it; 125 GpuCommandBufferStubBase* stub = *it;
123 if (stub->has_surface_state()) 126 if (stub->has_surface_state())
124 stubs_with_surface.push_back(stub); 127 stubs_with_surface.push_back(stub);
125 else 128 else
126 stubs_without_surface.push_back(stub); 129 stubs_without_surface.push_back(stub);
127 } 130 }
128 } 131 }
129 132
130 // Sort stubs with surface into {visibility,last_used_time} order using 133 // Sort stubs with surface into {visibility,last_used_time} order using
131 // custom comparator 134 // custom comparator
132 std::sort(stubs_with_surface.begin(), 135 std::sort(stubs_with_surface.begin(),
133 stubs_with_surface.end(), 136 stubs_with_surface.end(),
134 StubWithSurfaceComparator()); 137 StubWithSurfaceComparator());
135 DCHECK(std::unique(stubs_with_surface.begin(), stubs_with_surface.end()) == 138 DCHECK(std::unique(stubs_with_surface.begin(), stubs_with_surface.end()) ==
136 stubs_with_surface.end()); 139 stubs_with_surface.end());
137 140
138 // Separate stubs into memory allocation sets. 141 // Separate stubs into memory allocation sets.
139 std::vector<GpuCommandBufferStubBase*> all_buffers, front_buffers, no_buffers; 142 std::vector<GpuCommandBufferStubBase*> stubs_with_surface_foreground,
143 stubs_with_surface_background,
144 stubs_with_surface_hibernated,
145 stubs_without_surface_foreground,
146 stubs_without_surface_background,
147 stubs_without_surface_hibernated;
140 148
141 for (size_t i = 0; i < stubs_with_surface.size(); ++i) { 149 for (size_t i = 0; i < stubs_with_surface.size(); ++i) {
142 GpuCommandBufferStubBase* stub = stubs_with_surface[i]; 150 GpuCommandBufferStubBase* stub = stubs_with_surface[i];
143 DCHECK(stub->has_surface_state()); 151 DCHECK(stub->has_surface_state());
144 if (stub->surface_state().visible) { 152 if (stub->surface_state().visible)
145 all_buffers.push_back(stub); 153 stubs_with_surface_foreground.push_back(stub);
146 stub->SetMemoryAllocation(all_buffers_allocation); 154 else if (i < max_surfaces_with_frontbuffer_soft_limit_)
147 } else if (i < max_surfaces_with_frontbuffer_soft_limit_) { 155 stubs_with_surface_background.push_back(stub);
148 front_buffers.push_back(stub); 156 else
149 stub->SetMemoryAllocation(front_buffers_allocation); 157 stubs_with_surface_hibernated.push_back(stub);
150 } else {
151 no_buffers.push_back(stub);
152 stub->SetMemoryAllocation(no_buffers_allocation);
153 }
154 } 158 }
155
156 // Now, go through the stubs without surfaces and deduce visibility using the
157 // visibility of stubs which are in the same context share group.
158 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it = 159 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it =
159 stubs_without_surface.begin(); it != stubs_without_surface.end(); ++it) { 160 stubs_without_surface.begin(); it != stubs_without_surface.end(); ++it) {
160 GpuCommandBufferStubBase* stub = *it; 161 GpuCommandBufferStubBase* stub = *it;
161 DCHECK(!stub->has_surface_state()); 162 DCHECK(!stub->has_surface_state());
162 if (IsInSameContextShareGroupAsAnyOf(stub, all_buffers)) { 163
163 stub->SetMemoryAllocation(all_buffers_allocation); 164 // Stubs without surfaces have deduced allocation state using the state
164 } else if (IsInSameContextShareGroupAsAnyOf(stub, front_buffers)) { 165 // of surface stubs which are in the same context share group.
165 stub->SetMemoryAllocation(front_buffers_allocation); 166 if (IsInSameContextShareGroupAsAnyOf(stub, stubs_with_surface_foreground))
166 } else { 167 stubs_without_surface_foreground.push_back(stub);
167 stub->SetMemoryAllocation(no_buffers_allocation); 168 else if (IsInSameContextShareGroupAsAnyOf(
168 } 169 stub, stubs_with_surface_background))
170 stubs_without_surface_background.push_back(stub);
171 else
172 stubs_without_surface_hibernated.push_back(stub);
169 } 173 }
174
175 // Calculate memory allocation size in bytes given to each stub, by sharing
176 // global limit equally among those that need it.
177 size_t num_foreground_stubs = stubs_with_surface_foreground.size() +
178 stubs_without_surface_foreground.size();
179 size_t base_allocation_size = kMinimumAllocationForTab * num_foreground_stubs;
180 size_t bonus_allocation = 0;
181 if (base_allocation_size < kMaximumAllocationForTabs &&
182 !stubs_with_surface_foreground.empty())
183 bonus_allocation = (kMaximumAllocationForTabs - base_allocation_size) /
184 stubs_with_surface_foreground.size();
185
186 // Set up allocation values for possible states for stubs with surfaces.
187 GpuMemoryAllocation backgroundAllocation(1,
188 GpuMemoryAllocation::kHasFrontbuffer);
189 GpuMemoryAllocation hibernatedAllocation(0,
190 GpuMemoryAllocation::kHasNoBuffers);
191
192 // Now give out allocations to everyone.
193 AssignMemoryAllocations(stubs_with_surface_foreground,
194 GpuMemoryAllocation(base_allocation_size + bonus_allocation,
195 GpuMemoryAllocation::kHasFrontbuffer |
196 GpuMemoryAllocation::kHasBackbuffer));
197
198 AssignMemoryAllocations(stubs_with_surface_background,
199 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasFrontbuffer));
200
201 AssignMemoryAllocations(stubs_with_surface_hibernated,
202 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers));
203
204 AssignMemoryAllocations(stubs_without_surface_foreground,
205 GpuMemoryAllocation(base_allocation_size,
206 GpuMemoryAllocation::kHasNoBuffers));
207
208 AssignMemoryAllocations(stubs_without_surface_background,
209 GpuMemoryAllocation(1, GpuMemoryAllocation::kHasNoBuffers));
210
211 AssignMemoryAllocations(stubs_without_surface_hibernated,
212 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers));
170 } 213 }
171 214
172 #endif 215 #endif
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698