Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(199)

Side by Side Diff: content/common/gpu/gpu_memory_manager.cc

Issue 10267002: Revert 134428 - GpuMemoryManager suggests values for renderer Contents Texture Managers' preferred … (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: Created 8 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/gpu/gpu_memory_manager.h" 5 #include "content/common/gpu/gpu_memory_manager.h"
6 6
7 #if defined(ENABLE_GPU) 7 #if defined(ENABLE_GPU)
8 8
9 #include <algorithm> 9 #include <algorithm>
10 10
11 #include "base/bind.h" 11 #include "base/bind.h"
12 #include "base/message_loop.h" 12 #include "base/message_loop.h"
13 #include "content/common/gpu/gpu_command_buffer_stub.h" 13 #include "content/common/gpu/gpu_command_buffer_stub.h"
14 #include "content/common/gpu/gpu_memory_allocation.h" 14 #include "content/common/gpu/gpu_memory_allocation.h"
15 15
16 namespace { 16 namespace {
17 17
18 // These are predefined values (in bytes) for
19 // GpuMemoryAllocation::gpuResourceSizeInBytes. Currently, the value is only
20 // used to check if it is 0 or non-0. In the future, these values will not
21 // come from constants, but rather will be distributed dynamically.
22 enum {
23 kResourceSizeNonHibernatedTab = 1,
24 kResourceSizeHibernatedTab = 0
25 };
26
18 bool IsInSameContextShareGroupAsAnyOf( 27 bool IsInSameContextShareGroupAsAnyOf(
19 const GpuCommandBufferStubBase* stub, 28 const GpuCommandBufferStubBase* stub,
20 const std::vector<GpuCommandBufferStubBase*>& stubs) { 29 const std::vector<GpuCommandBufferStubBase*>& stubs) {
21 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it = 30 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it =
22 stubs.begin(); it != stubs.end(); ++it) { 31 stubs.begin(); it != stubs.end(); ++it) {
23 if (stub->IsInSameContextShareGroup(**it)) 32 if (stub->IsInSameContextShareGroup(**it))
24 return true; 33 return true;
25 } 34 }
26 return false; 35 return false;
27 } 36 }
28 37
29 void AssignMemoryAllocations(std::vector<GpuCommandBufferStubBase*>& stubs,
30 GpuMemoryAllocation allocation) {
31 for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin();
32 it != stubs.end(); ++it) {
33 (*it)->SetMemoryAllocation(allocation);
34 }
35 }
36
37 } 38 }
38 39
39 GpuMemoryManager::GpuMemoryManager(GpuMemoryManagerClient* client, 40 GpuMemoryManager::GpuMemoryManager(GpuMemoryManagerClient* client,
40 size_t max_surfaces_with_frontbuffer_soft_limit) 41 size_t max_surfaces_with_frontbuffer_soft_limit)
41 : client_(client), 42 : client_(client),
42 manage_scheduled_(false), 43 manage_scheduled_(false),
43 max_surfaces_with_frontbuffer_soft_limit_( 44 max_surfaces_with_frontbuffer_soft_limit_(
44 max_surfaces_with_frontbuffer_soft_limit), 45 max_surfaces_with_frontbuffer_soft_limit),
45 weak_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { 46 weak_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) {
46 } 47 }
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
92 // 1. These contexts do not track {visibility,last_used_time}, so cannot 93 // 1. These contexts do not track {visibility,last_used_time}, so cannot
93 // sort them directly. 94 // sort them directly.
94 // 2. These contexts may be used by, and thus affect, other contexts, and so 95 // 2. These contexts may be used by, and thus affect, other contexts, and so
95 // cannot be less visible than any affected context. 96 // cannot be less visible than any affected context.
96 // 3. Contexts belong to share groups within which resources can be shared. 97 // 3. Contexts belong to share groups within which resources can be shared.
97 // 98 //
98 // As such, the rule for categorizing contexts without a surface is: 99 // As such, the rule for categorizing contexts without a surface is:
99 // 1. Find the most visible context-with-a-surface within each 100 // 1. Find the most visible context-with-a-surface within each
100 // context-without-a-surface's share group, and inherit its visibilty. 101 // context-without-a-surface's share group, and inherit its visibilty.
101 void GpuMemoryManager::Manage() { 102 void GpuMemoryManager::Manage() {
103 // Set up three allocation values for the three possible stub states
104 const GpuMemoryAllocation all_buffers_allocation(
105 kResourceSizeNonHibernatedTab, true, true);
106 const GpuMemoryAllocation front_buffers_allocation(
107 kResourceSizeNonHibernatedTab, false, true);
108 const GpuMemoryAllocation no_buffers_allocation(
109 kResourceSizeHibernatedTab, false, false);
110
102 manage_scheduled_ = false; 111 manage_scheduled_ = false;
103 112
104 // Create stub lists by separating out the two types received from client 113 // Create stub lists by separating out the two types received from client
105 std::vector<GpuCommandBufferStubBase*> stubs_with_surface; 114 std::vector<GpuCommandBufferStubBase*> stubs_with_surface;
106 std::vector<GpuCommandBufferStubBase*> stubs_without_surface; 115 std::vector<GpuCommandBufferStubBase*> stubs_without_surface;
107 { 116 {
108 std::vector<GpuCommandBufferStubBase*> stubs; 117 std::vector<GpuCommandBufferStubBase*> stubs;
109 client_->AppendAllCommandBufferStubs(stubs); 118 client_->AppendAllCommandBufferStubs(stubs);
110 119
111 for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin(); 120 for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin();
112 it != stubs.end(); ++it) { 121 it != stubs.end(); ++it) {
113 GpuCommandBufferStubBase* stub = *it; 122 GpuCommandBufferStubBase* stub = *it;
114 if (!stub->client_has_memory_allocation_changed_callback())
115 continue;
116 if (stub->has_surface_state()) 123 if (stub->has_surface_state())
117 stubs_with_surface.push_back(stub); 124 stubs_with_surface.push_back(stub);
118 else 125 else
119 stubs_without_surface.push_back(stub); 126 stubs_without_surface.push_back(stub);
120 } 127 }
121 } 128 }
122 129
123 // Sort stubs with surface into {visibility,last_used_time} order using 130 // Sort stubs with surface into {visibility,last_used_time} order using
124 // custom comparator 131 // custom comparator
125 std::sort(stubs_with_surface.begin(), 132 std::sort(stubs_with_surface.begin(),
126 stubs_with_surface.end(), 133 stubs_with_surface.end(),
127 StubWithSurfaceComparator()); 134 StubWithSurfaceComparator());
128 DCHECK(std::unique(stubs_with_surface.begin(), stubs_with_surface.end()) == 135 DCHECK(std::unique(stubs_with_surface.begin(), stubs_with_surface.end()) ==
129 stubs_with_surface.end()); 136 stubs_with_surface.end());
130 137
131 // Separate stubs into memory allocation sets. 138 // Separate stubs into memory allocation sets.
132 std::vector<GpuCommandBufferStubBase*> stubs_with_surface_foreground, 139 std::vector<GpuCommandBufferStubBase*> all_buffers, front_buffers, no_buffers;
133 stubs_with_surface_background,
134 stubs_with_surface_hibernated,
135 stubs_without_surface_foreground,
136 stubs_without_surface_background,
137 stubs_without_surface_hibernated;
138 140
139 for (size_t i = 0; i < stubs_with_surface.size(); ++i) { 141 for (size_t i = 0; i < stubs_with_surface.size(); ++i) {
140 GpuCommandBufferStubBase* stub = stubs_with_surface[i]; 142 GpuCommandBufferStubBase* stub = stubs_with_surface[i];
141 DCHECK(stub->has_surface_state()); 143 DCHECK(stub->has_surface_state());
142 if (stub->surface_state().visible) 144 if (stub->surface_state().visible) {
143 stubs_with_surface_foreground.push_back(stub); 145 all_buffers.push_back(stub);
144 else if (i < max_surfaces_with_frontbuffer_soft_limit_) 146 stub->SetMemoryAllocation(all_buffers_allocation);
145 stubs_with_surface_background.push_back(stub); 147 } else if (i < max_surfaces_with_frontbuffer_soft_limit_) {
146 else 148 front_buffers.push_back(stub);
147 stubs_with_surface_hibernated.push_back(stub); 149 stub->SetMemoryAllocation(front_buffers_allocation);
150 } else {
151 no_buffers.push_back(stub);
152 stub->SetMemoryAllocation(no_buffers_allocation);
153 }
148 } 154 }
155
156 // Now, go through the stubs without surfaces and deduce visibility using the
157 // visibility of stubs which are in the same context share group.
149 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it = 158 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it =
150 stubs_without_surface.begin(); it != stubs_without_surface.end(); ++it) { 159 stubs_without_surface.begin(); it != stubs_without_surface.end(); ++it) {
151 GpuCommandBufferStubBase* stub = *it; 160 GpuCommandBufferStubBase* stub = *it;
152 DCHECK(!stub->has_surface_state()); 161 DCHECK(!stub->has_surface_state());
153 162 if (IsInSameContextShareGroupAsAnyOf(stub, all_buffers)) {
154 // Stubs without surfaces have deduced allocation state using the state 163 stub->SetMemoryAllocation(all_buffers_allocation);
155 // of surface stubs which are in the same context share group. 164 } else if (IsInSameContextShareGroupAsAnyOf(stub, front_buffers)) {
156 if (IsInSameContextShareGroupAsAnyOf(stub, stubs_with_surface_foreground)) 165 stub->SetMemoryAllocation(front_buffers_allocation);
157 stubs_without_surface_foreground.push_back(stub); 166 } else {
158 else if (IsInSameContextShareGroupAsAnyOf( 167 stub->SetMemoryAllocation(no_buffers_allocation);
159 stub, stubs_with_surface_background)) 168 }
160 stubs_without_surface_background.push_back(stub);
161 else
162 stubs_without_surface_hibernated.push_back(stub);
163 } 169 }
164
165 // Calculate memory allocation size in bytes given to each stub, by sharing
166 // global limit equally among those that need it.
167 size_t num_stubs_need_mem = stubs_with_surface_foreground.size() +
168 stubs_without_surface_foreground.size() +
169 stubs_without_surface_background.size();
170 size_t base_allocation_size = kMinimumAllocationForTab * num_stubs_need_mem;
171 size_t bonus_allocation = 0;
172 if (base_allocation_size < kMaximumAllocationForTabs &&
173 !stubs_with_surface_foreground.empty())
174 bonus_allocation = (kMaximumAllocationForTabs - base_allocation_size) /
175 stubs_with_surface_foreground.size();
176
177 // Now give out allocations to everyone.
178 AssignMemoryAllocations(stubs_with_surface_foreground,
179 GpuMemoryAllocation(kMinimumAllocationForTab + bonus_allocation,
180 GpuMemoryAllocation::kHasFrontbuffer |
181 GpuMemoryAllocation::kHasBackbuffer));
182
183 AssignMemoryAllocations(stubs_with_surface_background,
184 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasFrontbuffer));
185
186 AssignMemoryAllocations(stubs_with_surface_hibernated,
187 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers));
188
189 AssignMemoryAllocations(stubs_without_surface_foreground,
190 GpuMemoryAllocation(kMinimumAllocationForTab,
191 GpuMemoryAllocation::kHasNoBuffers));
192
193 AssignMemoryAllocations(stubs_without_surface_background,
194 GpuMemoryAllocation(kMinimumAllocationForTab,
195 GpuMemoryAllocation::kHasNoBuffers));
196
197 AssignMemoryAllocations(stubs_without_surface_hibernated,
198 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers));
199 } 170 }
200 171
201 #endif 172 #endif
OLDNEW
« no previous file with comments | « content/common/gpu/gpu_memory_manager.h ('k') | content/common/gpu/gpu_memory_manager_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698