Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(266)

Side by Side Diff: gpu/command_buffer/client/fenced_allocator.cc

Issue 116863003: gpu: Reuse transfer buffers more aggresively (Closed) Base URL: http://git.chromium.org/chromium/src.git@master
Patch Set: [WIP] Review comments follow-up Created 6 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 // This file contains the implementation of the FencedAllocator class. 5 // This file contains the implementation of the FencedAllocator class.
6 6
7 #include "gpu/command_buffer/client/fenced_allocator.h" 7 #include "gpu/command_buffer/client/fenced_allocator.h"
8 8
9 #include <algorithm> 9 #include <algorithm>
10 10
(...skipping 24 matching lines...) Expand all
35 35
36 FencedAllocator::FencedAllocator(unsigned int size, 36 FencedAllocator::FencedAllocator(unsigned int size,
37 CommandBufferHelper *helper) 37 CommandBufferHelper *helper)
38 : helper_(helper), 38 : helper_(helper),
39 bytes_in_use_(0) { 39 bytes_in_use_(0) {
40 Block block = { FREE, 0, RoundDown(size), kUnusedToken }; 40 Block block = { FREE, 0, RoundDown(size), kUnusedToken };
41 blocks_.push_back(block); 41 blocks_.push_back(block);
42 } 42 }
43 43
44 FencedAllocator::~FencedAllocator() { 44 FencedAllocator::~FencedAllocator() {
45 // Free blocks pending tokens. 45 // Free blocks pending tokens and serials.
46 for (unsigned int i = 0; i < blocks_.size(); ++i) { 46 for (unsigned int i = 0; i < blocks_.size(); ++i) {
47 if (blocks_[i].state == FREE_PENDING_TOKEN) { 47 switch (blocks_[i].state) {
48 i = WaitForTokenAndFreeBlock(i); 48 case FREE_PENDING_TOKEN:
49 i = WaitForTokenAndFreeBlock(i);
50 break;
51 case FREE_PENDING_SERIAL:
52 blocks_[i].state = FREE;
53 CollapseFreeBlock(i);
reveman 2014/01/16 17:24:49 I don't think it makes sense free this immediately
piman 2014/01/16 21:22:50 That is actually not needed to ensure things on th
jadahl 2014/01/17 08:50:25 We could just helper_->Finish() before the loop to
reveman 2014/01/17 16:56:47 If we're adding an async-token, then some command
54 break;
55 default:
56 break;
49 } 57 }
50 } 58 }
51 // These checks are not valid if the service has crashed or lost the context. 59 // These checks are not valid if the service has crashed or lost the context.
52 // DCHECK_EQ(blocks_.size(), 1u); 60 // DCHECK_EQ(blocks_.size(), 1u);
53 // DCHECK_EQ(blocks_[0].state, FREE); 61 // DCHECK_EQ(blocks_[0].state, FREE);
54 } 62 }
55 63
56 // Looks for a non-allocated block that is big enough. Search in the FREE 64 // Looks for a non-allocated block that is big enough. Search in the FREE
57 // blocks first (for direct usage), first-fit, then in the FREE_PENDING_TOKEN 65 // blocks first (for direct usage), first-fit, then in the FREE_PENDING_TOKEN
58 // blocks, waiting for them. The current implementation isn't smart about 66 // blocks, waiting for them. The current implementation isn't smart about
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
106 void FencedAllocator::FreePendingToken( 114 void FencedAllocator::FreePendingToken(
107 FencedAllocator::Offset offset, int32 token) { 115 FencedAllocator::Offset offset, int32 token) {
108 BlockIndex index = GetBlockByOffset(offset); 116 BlockIndex index = GetBlockByOffset(offset);
109 Block &block = blocks_[index]; 117 Block &block = blocks_[index];
110 if (block.state == IN_USE) 118 if (block.state == IN_USE)
111 bytes_in_use_ -= block.size; 119 bytes_in_use_ -= block.size;
112 block.state = FREE_PENDING_TOKEN; 120 block.state = FREE_PENDING_TOKEN;
113 block.token = token; 121 block.token = token;
114 } 122 }
115 123
124 void FencedAllocator::FreePendingSerial(
125 FencedAllocator::Offset offset, uint32 serial) {
126 BlockIndex index = GetBlockByOffset(offset);
127 Block &block = blocks_[index];
128 if (block.state == IN_USE)
129 bytes_in_use_ -= block.size;
130 block.state = FREE_PENDING_SERIAL;
131 block.serial = serial;
132 }
133
116 // Gets the max of the size of the blocks marked as free. 134 // Gets the max of the size of the blocks marked as free.
117 unsigned int FencedAllocator::GetLargestFreeSize() { 135 unsigned int FencedAllocator::GetLargestFreeSize() {
118 FreeUnused(); 136 FreeUnused();
119 unsigned int max_size = 0; 137 unsigned int max_size = 0;
120 for (unsigned int i = 0; i < blocks_.size(); ++i) { 138 for (unsigned int i = 0; i < blocks_.size(); ++i) {
121 Block &block = blocks_[i]; 139 Block &block = blocks_[i];
122 if (block.state == FREE) 140 if (block.state == FREE)
123 max_size = std::max(max_size, block.size); 141 max_size = std::max(max_size, block.size);
124 } 142 }
125 return max_size; 143 return max_size;
126 } 144 }
127 145
128 // Gets the size of the largest segment of blocks that are either FREE or 146 // Gets the size of the largest segment of blocks that are either FREE or
129 // FREE_PENDING_TOKEN. 147 // FREE_PENDING_TOKEN.
130 unsigned int FencedAllocator::GetLargestFreeOrPendingSize() { 148 unsigned int FencedAllocator::GetLargestFreeOrPendingSize() {
131 unsigned int max_size = 0; 149 unsigned int max_size = 0;
132 unsigned int current_size = 0; 150 unsigned int current_size = 0;
133 for (unsigned int i = 0; i < blocks_.size(); ++i) { 151 for (unsigned int i = 0; i < blocks_.size(); ++i) {
134 Block &block = blocks_[i]; 152 Block &block = blocks_[i];
135 if (block.state == IN_USE) { 153 switch (block.state) {
136 max_size = std::max(max_size, current_size); 154 case IN_USE:
137 current_size = 0; 155 case FREE_PENDING_SERIAL:
138 } else { 156 max_size = std::max(max_size, current_size);
139 DCHECK(block.state == FREE || block.state == FREE_PENDING_TOKEN); 157 current_size = 0;
140 current_size += block.size; 158 break;
159 default:
160 DCHECK(block.state == FREE || block.state == FREE_PENDING_TOKEN);
161 current_size += block.size;
162 break;
141 } 163 }
142 } 164 }
143 return std::max(max_size, current_size); 165 return std::max(max_size, current_size);
144 } 166 }
145 167
146 // Makes sure that: 168 // Makes sure that:
147 // - there is at least one block. 169 // - there is at least one block.
148 // - there are no contiguous FREE blocks (they should have been collapsed). 170 // - there are no contiguous FREE blocks (they should have been collapsed).
149 // - the successive offsets match the block sizes, and they are in order. 171 // - the successive offsets match the block sizes, and they are in order.
150 bool FencedAllocator::CheckConsistency() { 172 bool FencedAllocator::CheckConsistency() {
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
196 BlockIndex index) { 218 BlockIndex index) {
197 Block &block = blocks_[index]; 219 Block &block = blocks_[index];
198 DCHECK_EQ(block.state, FREE_PENDING_TOKEN); 220 DCHECK_EQ(block.state, FREE_PENDING_TOKEN);
199 helper_->WaitForToken(block.token); 221 helper_->WaitForToken(block.token);
200 block.state = FREE; 222 block.state = FREE;
201 return CollapseFreeBlock(index); 223 return CollapseFreeBlock(index);
202 } 224 }
203 225
204 // Frees any blocks pending a token for which the token has been read. 226 // Frees any blocks pending a token for which the token has been read.
205 void FencedAllocator::FreeUnused() { 227 void FencedAllocator::FreeUnused() {
206 int32 last_token_read = helper_->last_token_read();
207 for (unsigned int i = 0; i < blocks_.size();) { 228 for (unsigned int i = 0; i < blocks_.size();) {
208 Block& block = blocks_[i]; 229 Block& block = blocks_[i];
209 if (block.state == FREE_PENDING_TOKEN && block.token <= last_token_read) { 230 switch (block.state) {
210 block.state = FREE; 231 case FREE_PENDING_TOKEN:
211 i = CollapseFreeBlock(i); 232 if (helper_->HasTokenPassed(block.token)) {
212 } else { 233 block.state = FREE;
213 ++i; 234 i = CollapseFreeBlock(i);
235 } else {
236 ++i;
237 }
238 break;
239 case FREE_PENDING_SERIAL:
240 if (helper_->HasSerialPassed(block.serial)) {
241 block.state = FREE;
242 i = CollapseFreeBlock(i);
243 } else {
244 ++i;
245 }
246 break;
247 default:
248 ++i;
249 break;
214 } 250 }
215 } 251 }
216 } 252 }
217 253
218 // If the block is exactly the requested size, simply mark it IN_USE, otherwise 254 // If the block is exactly the requested size, simply mark it IN_USE, otherwise
219 // split it and mark the first one (of the requested size) IN_USE. 255 // split it and mark the first one (of the requested size) IN_USE.
220 FencedAllocator::Offset FencedAllocator::AllocInBlock(BlockIndex index, 256 FencedAllocator::Offset FencedAllocator::AllocInBlock(BlockIndex index,
221 unsigned int size) { 257 unsigned int size) {
222 Block &block = blocks_[index]; 258 Block &block = blocks_[index];
223 DCHECK_GE(block.size, size); 259 DCHECK_GE(block.size, size);
224 DCHECK_EQ(block.state, FREE); 260 DCHECK_EQ(block.state, FREE);
225 Offset offset = block.offset; 261 Offset offset = block.offset;
226 bytes_in_use_ += size; 262 bytes_in_use_ += size;
227 if (block.size == size) { 263 if (block.size == size) {
228 block.state = IN_USE; 264 block.state = IN_USE;
229 return offset; 265 return offset;
230 } 266 }
231 Block newblock = { FREE, offset + size, block.size - size, kUnusedToken}; 267 Block newblock = {
268 FREE, offset + size,
269 block.size - size,
270 kUnusedToken,
271 kUnusedSerial
272 };
232 block.state = IN_USE; 273 block.state = IN_USE;
233 block.size = size; 274 block.size = size;
234 // this is the last thing being done because it may invalidate block; 275 // this is the last thing being done because it may invalidate block;
235 blocks_.insert(blocks_.begin() + index + 1, newblock); 276 blocks_.insert(blocks_.begin() + index + 1, newblock);
236 return offset; 277 return offset;
237 } 278 }
238 279
239 // The blocks are in offset order, so we can do a binary search. 280 // The blocks are in offset order, so we can do a binary search.
240 FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) { 281 FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) {
241 Block templ = { IN_USE, offset, 0, kUnusedToken }; 282 Block templ = {
283 IN_USE,
284 offset,
285 0,
286 kUnusedToken,
287 kUnusedSerial
288 };
242 Container::iterator it = std::lower_bound(blocks_.begin(), blocks_.end(), 289 Container::iterator it = std::lower_bound(blocks_.begin(), blocks_.end(),
243 templ, OffsetCmp()); 290 templ, OffsetCmp());
244 DCHECK(it != blocks_.end() && it->offset == offset); 291 DCHECK(it != blocks_.end() && it->offset == offset);
245 return it-blocks_.begin(); 292 return it-blocks_.begin();
246 } 293 }
247 294
248 } // namespace gpu 295 } // namespace gpu
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698