Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(138)

Side by Side Diff: content/common/gpu/gpu_memory_manager.cc

Issue 9289052: Adding GpuMemoryManager to track GpuCommandBufferStub visibility and last_used_time and dictate mem… (Closed) Base URL: http://git.chromium.org/chromium/src.git@master
Patch Set: Minor updates, working on tests Created 8 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "content/common/gpu/gpu_memory_manager.h"
6
7 #if defined(ENABLE_GPU)
8
9 #include <set>
10 #include <algorithm>
11
12 #include "base/bind.h"
13 #include "base/message_loop.h"
14
15 ////////////////////////////////////////////////////////////////////////////////
16 // Constructors/Destructors
17
18 GpuMemoryManager::GpuMemoryManager(GpuMemoryManagerClient* client)
19 : client_(client)
20 , manage_scheduled_(false) {
jonathan.backer 2012/01/31 18:13:58 nit: sorry, but I think this is the WK way, not th
mmocny 2012/01/31 18:54:57 Done.
21 }
22
23 GpuMemoryManager::~GpuMemoryManager() {
24 }
25
26 ////////////////////////////////////////////////////////////////////////////////
27
28 void GpuMemoryManager::ScheduleManage() {
29 if (manage_scheduled_)
30 return;
31 manage_scheduled_ = true;
32 MessageLoop::current()->PostTask(
33 FROM_HERE, base::Bind(&GpuMemoryManager::Manage, base::Unretained(this)));
nduca 2012/01/31 06:53:47 Hmmm isn't this going to crash if the task is post
34 }
35
36 void GpuMemoryManager::Manage() {
nduca 2012/01/31 06:53:47 I think you should take some of the comments from
37 manage_scheduled_ = false;
38
39 // Ask client for command buffer stubs
40 std::vector<GpuMemoryManageableCommandBufferStub*> stubs_with_surface;
nduca 2012/01/31 06:53:47 this is where I was saying you should just get all
mmocny 2012/01/31 18:54:57 Done.
41 std::vector<GpuMemoryManageableCommandBufferStub*> stubs_without_surface;
42 client_->AppendAllCommandBufferStubs(stubs_with_surface,
43 stubs_without_surface);
44
45 // Sort stubs with surface into {visibility,last_used_time} order using
46 // custom comparator
47 std::sort(stubs_with_surface.begin(), stubs_with_surface.end(),
48 StubComparator());
49
50 // Create allocations
nduca 2012/01/31 06:53:47 This comment isn't adding a lot of value. You're b
mmocny 2012/01/31 18:54:57 Done.
51 GpuMemoryAllocation all_buffers_allocation;
52 all_buffers_allocation.gpuResourceSizeInBytes =
53 GpuMemoryAllocation::kResourceSizeForegroundTab;
54 all_buffers_allocation.hasFrontbuffer = true;
55 all_buffers_allocation.hasBackbuffer = true;
56
57 GpuMemoryAllocation front_buffers_allocation;
58 front_buffers_allocation.gpuResourceSizeInBytes =
59 GpuMemoryAllocation::kResourceSizeBackgroundTab;
60 front_buffers_allocation.hasFrontbuffer = true;
61 front_buffers_allocation.hasBackbuffer = false;
62
63 GpuMemoryAllocation no_buffers_allocation;
64 no_buffers_allocation.gpuResourceSizeInBytes =
65 GpuMemoryAllocation::kResourceSizeHibernatedTab;
66 no_buffers_allocation.hasFrontbuffer = false;
67 no_buffers_allocation.hasBackbuffer = false;
68
jonathan.backer 2012/01/31 18:13:58 AFAICT, these never change. Define them as constan
mmocny 2012/01/31 18:54:57 Done.
69 // Separate stubs with surfaces into three sets and send memory allocation
nduca 2012/01/31 06:53:47 This should really be put up in the overall commen
70 // 1. all_buffers: Front, Back, and RootLayerTiles [all visible surfaces]
71 // 2. front_buffers: Front only [based on #tab limit]
72 // 3. no_buffers: None [the rest]
73 static const size_t kMaxSurfacesWithFrontBufferSoftLimit = 8;
74 std::set<int32> all_buffers, front_buffers, no_buffers;
75
76 for (size_t i = 0; i < stubs_with_surface.size(); ++i) {
77 GpuMemoryManageableCommandBufferStub* stub = stubs_with_surface[i];
78 if (stub->surface_state().visible) {
79 all_buffers.insert(stub->surface_state().surface_id);
80 stub->SendMemoryAllocation(all_buffers_allocation);
jonathan.backer 2012/01/31 18:13:58 Is this necessary given the code below? Aren't we
mmocny 2012/01/31 18:54:57 The first loop is over stubs_with_surfaces, the se
81 } else if (i < kMaxSurfacesWithFrontBufferSoftLimit) {
82 front_buffers.insert(stub->surface_state().surface_id);
83 stub->SendMemoryAllocation(front_buffers_allocation);
84 } else {
85 no_buffers.insert(stub->surface_state().surface_id);
86 stub->SendMemoryAllocation(no_buffers_allocation);
87 }
88 }
89
90 // Now, go through the stubs without surfaces and send memory allocations
91 // based on buckets we just divided. Because there may be multiple affected
92 // surfaces, use the state of the most "important" affected surface.
93 for (std::vector<GpuMemoryManageableCommandBufferStub*>::const_iterator it =
94 stubs_without_surface.begin(); it != stubs_without_surface.end(); ++it) {
95 GpuMemoryManageableCommandBufferStub* stub = *it;
96 if (std::find_first_of(all_buffers.begin(),
97 all_buffers.end(),
98 stub->affected_surface_ids().begin(),
99 stub->affected_surface_ids().end()) !=
100 all_buffers.end()) {
101 stub->SendMemoryAllocation(all_buffers_allocation);
102 } else if (std::find_first_of(front_buffers.begin(),
103 front_buffers.end(),
104 stub->affected_surface_ids().begin(),
105 stub->affected_surface_ids().end()) !=
106 front_buffers.end()) {
107 stub->SendMemoryAllocation(front_buffers_allocation);
108 } else if (std::find_first_of(no_buffers.begin(),
109 no_buffers.end(),
110 stub->affected_surface_ids().begin(),
111 stub->affected_surface_ids().end()) !=
112 no_buffers.end()) {
113 stub->SendMemoryAllocation(no_buffers_allocation);
114 } else {
115 // TODO(mmocny): Either (a) no affected surfaces, or
nduca 2012/01/31 06:53:47 Fix this so you put an if(affected_surface_ids().s
mmocny 2012/01/31 18:54:57 Done.
116 // (b) your affected surfaces are incorrect
117 // (a) is fine, (b) is not
118 }
119 }
120 }
121
122 ////////////////////////////////////////////////////////////////////////////////
123
124 #endif
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698