Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(206)

Side by Side Diff: content/common/gpu/gpu_memory_manager.cc

Issue 10083056: GpuMemoryManager suggests values for renderer Contents Texture Managers' preferred memory limit. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: On max 10.5, it fails to init command buffer, and my callback code didn't guard against that. Created 8 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/gpu/gpu_memory_manager.h" 5 #include "content/common/gpu/gpu_memory_manager.h"
6 6
7 #if defined(ENABLE_GPU) 7 #if defined(ENABLE_GPU)
8 8
9 #include <algorithm> 9 #include <algorithm>
10 10
11 #include "base/bind.h" 11 #include "base/bind.h"
12 #include "base/message_loop.h" 12 #include "base/message_loop.h"
13 #include "content/common/gpu/gpu_command_buffer_stub.h" 13 #include "content/common/gpu/gpu_command_buffer_stub.h"
14 #include "content/common/gpu/gpu_memory_allocation.h" 14 #include "content/common/gpu/gpu_memory_allocation.h"
15 15
16 namespace { 16 namespace {
17 17
18 // These are predefined values (in bytes) for
19 // GpuMemoryAllocation::gpuResourceSizeInBytes. Currently, the value is only
20 // used to check if it is 0 or non-0. In the future, these values will not
21 // come from constants, but rather will be distributed dynamically.
22 enum {
23 kResourceSizeNonHibernatedTab = 1,
24 kResourceSizeHibernatedTab = 0
25 };
26
27 bool IsInSameContextShareGroupAsAnyOf( 18 bool IsInSameContextShareGroupAsAnyOf(
28 const GpuCommandBufferStubBase* stub, 19 const GpuCommandBufferStubBase* stub,
29 const std::vector<GpuCommandBufferStubBase*>& stubs) { 20 const std::vector<GpuCommandBufferStubBase*>& stubs) {
30 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it = 21 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it =
31 stubs.begin(); it != stubs.end(); ++it) { 22 stubs.begin(); it != stubs.end(); ++it) {
32 if (stub->IsInSameContextShareGroup(**it)) 23 if (stub->IsInSameContextShareGroup(**it))
33 return true; 24 return true;
34 } 25 }
35 return false; 26 return false;
36 } 27 }
37 28
29 void AssignMemoryAllocations(std::vector<GpuCommandBufferStubBase*>& stubs,
30 GpuMemoryAllocation allocation) {
31 for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin();
32 it != stubs.end(); ++it) {
33 (*it)->SetMemoryAllocation(allocation);
34 }
35 }
36
38 } 37 }
39 38
40 GpuMemoryManager::GpuMemoryManager(GpuMemoryManagerClient* client, 39 GpuMemoryManager::GpuMemoryManager(GpuMemoryManagerClient* client,
41 size_t max_surfaces_with_frontbuffer_soft_limit) 40 size_t max_surfaces_with_frontbuffer_soft_limit)
42 : client_(client), 41 : client_(client),
43 manage_scheduled_(false), 42 manage_scheduled_(false),
44 max_surfaces_with_frontbuffer_soft_limit_( 43 max_surfaces_with_frontbuffer_soft_limit_(
45 max_surfaces_with_frontbuffer_soft_limit), 44 max_surfaces_with_frontbuffer_soft_limit),
46 weak_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { 45 weak_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) {
47 } 46 }
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
93 // 1. These contexts do not track {visibility,last_used_time}, so cannot 92 // 1. These contexts do not track {visibility,last_used_time}, so cannot
94 // sort them directly. 93 // sort them directly.
95 // 2. These contexts may be used by, and thus affect, other contexts, and so 94 // 2. These contexts may be used by, and thus affect, other contexts, and so
96 // cannot be less visible than any affected context. 95 // cannot be less visible than any affected context.
97 // 3. Contexts belong to share groups within which resources can be shared. 96 // 3. Contexts belong to share groups within which resources can be shared.
98 // 97 //
99 // As such, the rule for categorizing contexts without a surface is: 98 // As such, the rule for categorizing contexts without a surface is:
100 // 1. Find the most visible context-with-a-surface within each 99 // 1. Find the most visible context-with-a-surface within each
101 // context-without-a-surface's share group, and inherit its visibilty. 100 // context-without-a-surface's share group, and inherit its visibilty.
102 void GpuMemoryManager::Manage() { 101 void GpuMemoryManager::Manage() {
103 // Set up three allocation values for the three possible stub states
104 const GpuMemoryAllocation all_buffers_allocation(
105 kResourceSizeNonHibernatedTab, true, true);
106 const GpuMemoryAllocation front_buffers_allocation(
107 kResourceSizeNonHibernatedTab, false, true);
108 const GpuMemoryAllocation no_buffers_allocation(
109 kResourceSizeHibernatedTab, false, false);
110
111 manage_scheduled_ = false; 102 manage_scheduled_ = false;
112 103
113 // Create stub lists by separating out the two types received from client 104 // Create stub lists by separating out the two types received from client
114 std::vector<GpuCommandBufferStubBase*> stubs_with_surface; 105 std::vector<GpuCommandBufferStubBase*> stubs_with_surface;
115 std::vector<GpuCommandBufferStubBase*> stubs_without_surface; 106 std::vector<GpuCommandBufferStubBase*> stubs_without_surface;
116 { 107 {
117 std::vector<GpuCommandBufferStubBase*> stubs; 108 std::vector<GpuCommandBufferStubBase*> stubs;
118 client_->AppendAllCommandBufferStubs(stubs); 109 client_->AppendAllCommandBufferStubs(stubs);
119 110
120 for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin(); 111 for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin();
121 it != stubs.end(); ++it) { 112 it != stubs.end(); ++it) {
122 GpuCommandBufferStubBase* stub = *it; 113 GpuCommandBufferStubBase* stub = *it;
114 if (!stub->client_has_memory_allocation_changed_callback())
115 continue;
123 if (stub->has_surface_state()) 116 if (stub->has_surface_state())
124 stubs_with_surface.push_back(stub); 117 stubs_with_surface.push_back(stub);
125 else 118 else
126 stubs_without_surface.push_back(stub); 119 stubs_without_surface.push_back(stub);
127 } 120 }
128 } 121 }
129 122
130 // Sort stubs with surface into {visibility,last_used_time} order using 123 // Sort stubs with surface into {visibility,last_used_time} order using
131 // custom comparator 124 // custom comparator
132 std::sort(stubs_with_surface.begin(), 125 std::sort(stubs_with_surface.begin(),
133 stubs_with_surface.end(), 126 stubs_with_surface.end(),
134 StubWithSurfaceComparator()); 127 StubWithSurfaceComparator());
135 DCHECK(std::unique(stubs_with_surface.begin(), stubs_with_surface.end()) == 128 DCHECK(std::unique(stubs_with_surface.begin(), stubs_with_surface.end()) ==
136 stubs_with_surface.end()); 129 stubs_with_surface.end());
137 130
138 // Separate stubs into memory allocation sets. 131 // Separate stubs into memory allocation sets.
139 std::vector<GpuCommandBufferStubBase*> all_buffers, front_buffers, no_buffers; 132 std::vector<GpuCommandBufferStubBase*> stubs_with_surface_foreground,
133 stubs_with_surface_background,
134 stubs_with_surface_hibernated,
135 stubs_without_surface_foreground,
136 stubs_without_surface_background,
137 stubs_without_surface_hibernated;
140 138
141 for (size_t i = 0; i < stubs_with_surface.size(); ++i) { 139 for (size_t i = 0; i < stubs_with_surface.size(); ++i) {
142 GpuCommandBufferStubBase* stub = stubs_with_surface[i]; 140 GpuCommandBufferStubBase* stub = stubs_with_surface[i];
143 DCHECK(stub->has_surface_state()); 141 DCHECK(stub->has_surface_state());
144 if (stub->surface_state().visible) { 142 if (stub->surface_state().visible)
145 all_buffers.push_back(stub); 143 stubs_with_surface_foreground.push_back(stub);
146 stub->SetMemoryAllocation(all_buffers_allocation); 144 else if (i < max_surfaces_with_frontbuffer_soft_limit_)
147 } else if (i < max_surfaces_with_frontbuffer_soft_limit_) { 145 stubs_with_surface_background.push_back(stub);
148 front_buffers.push_back(stub); 146 else
149 stub->SetMemoryAllocation(front_buffers_allocation); 147 stubs_with_surface_hibernated.push_back(stub);
150 } else {
151 no_buffers.push_back(stub);
152 stub->SetMemoryAllocation(no_buffers_allocation);
153 }
154 } 148 }
155
156 // Now, go through the stubs without surfaces and deduce visibility using the
157 // visibility of stubs which are in the same context share group.
158 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it = 149 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it =
159 stubs_without_surface.begin(); it != stubs_without_surface.end(); ++it) { 150 stubs_without_surface.begin(); it != stubs_without_surface.end(); ++it) {
160 GpuCommandBufferStubBase* stub = *it; 151 GpuCommandBufferStubBase* stub = *it;
161 DCHECK(!stub->has_surface_state()); 152 DCHECK(!stub->has_surface_state());
162 if (IsInSameContextShareGroupAsAnyOf(stub, all_buffers)) { 153
163 stub->SetMemoryAllocation(all_buffers_allocation); 154 // Stubs without surfaces have deduced allocation state using the state
164 } else if (IsInSameContextShareGroupAsAnyOf(stub, front_buffers)) { 155 // of surface stubs which are in the same context share group.
165 stub->SetMemoryAllocation(front_buffers_allocation); 156 if (IsInSameContextShareGroupAsAnyOf(stub, stubs_with_surface_foreground))
166 } else { 157 stubs_without_surface_foreground.push_back(stub);
167 stub->SetMemoryAllocation(no_buffers_allocation); 158 else if (IsInSameContextShareGroupAsAnyOf(
168 } 159 stub, stubs_with_surface_background))
160 stubs_without_surface_background.push_back(stub);
161 else
162 stubs_without_surface_hibernated.push_back(stub);
169 } 163 }
164
165 // Calculate memory allocation size in bytes given to each stub, by sharing
166 // global limit equally among those that need it.
167 size_t num_stubs_need_mem = stubs_with_surface_foreground.size() +
168 stubs_without_surface_foreground.size() +
169 stubs_without_surface_background.size();
170 size_t base_allocation_size = kMinimumAllocationForTab * num_stubs_need_mem;
171 size_t bonus_allocation = 0;
172 if (base_allocation_size < kMaximumAllocationForTabs &&
173 !stubs_with_surface_foreground.empty())
174 bonus_allocation = (kMaximumAllocationForTabs - base_allocation_size) /
175 stubs_with_surface_foreground.size();
176
177 // Now give out allocations to everyone.
178 AssignMemoryAllocations(stubs_with_surface_foreground,
179 GpuMemoryAllocation(kMinimumAllocationForTab + bonus_allocation,
180 GpuMemoryAllocation::kHasFrontbuffer |
181 GpuMemoryAllocation::kHasBackbuffer));
182
183 AssignMemoryAllocations(stubs_with_surface_background,
184 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasFrontbuffer));
185
186 AssignMemoryAllocations(stubs_with_surface_hibernated,
187 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers));
188
189 AssignMemoryAllocations(stubs_without_surface_foreground,
190 GpuMemoryAllocation(kMinimumAllocationForTab,
191 GpuMemoryAllocation::kHasNoBuffers));
192
193 AssignMemoryAllocations(stubs_without_surface_background,
194 GpuMemoryAllocation(kMinimumAllocationForTab,
195 GpuMemoryAllocation::kHasNoBuffers));
196
197 AssignMemoryAllocations(stubs_without_surface_hibernated,
198 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers));
170 } 199 }
171 200
172 #endif 201 #endif
OLDNEW
« no previous file with comments | « content/common/gpu/gpu_memory_manager.h ('k') | content/common/gpu/gpu_memory_manager_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698