| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_ | 5 #ifndef GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_ |
| 6 #define GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_ | 6 #define GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_ |
| 7 | 7 |
| 8 #include "base/bind.h" |
| 8 #include "base/memory/scoped_vector.h" | 9 #include "base/memory/scoped_vector.h" |
| 9 #include "gpu/command_buffer/client/fenced_allocator.h" | 10 #include "gpu/command_buffer/client/fenced_allocator.h" |
| 10 #include "gpu/command_buffer/common/buffer.h" | 11 #include "gpu/command_buffer/common/buffer.h" |
| 11 #include "gpu/command_buffer/common/types.h" | 12 #include "gpu/command_buffer/common/types.h" |
| 12 #include "gpu/gpu_export.h" | 13 #include "gpu/gpu_export.h" |
| 13 | 14 |
| 14 namespace gpu { | 15 namespace gpu { |
| 15 | 16 |
| 16 class CommandBufferHelper; | 17 class CommandBufferHelper; |
| 17 | 18 |
| 18 // Manages a shared memory segment. | 19 // Manages a shared memory segment. |
| 19 class GPU_EXPORT MemoryChunk { | 20 class GPU_EXPORT MemoryChunk { |
| 20 public: | 21 public: |
| 21 MemoryChunk(int32 shm_id, | 22 MemoryChunk(int32 shm_id, |
| 22 scoped_refptr<gpu::Buffer> shm, | 23 scoped_refptr<gpu::Buffer> shm, |
| 23 CommandBufferHelper* helper); | 24 CommandBufferHelper* helper, |
| 25 const base::Closure& poll_callback); |
| 24 ~MemoryChunk(); | 26 ~MemoryChunk(); |
| 25 | 27 |
| 26 // Gets the size of the largest free block that is available without waiting. | 28 // Gets the size of the largest free block that is available without waiting. |
| 27 unsigned int GetLargestFreeSizeWithoutWaiting() { | 29 unsigned int GetLargestFreeSizeWithoutWaiting() { |
| 28 return allocator_.GetLargestFreeSize(); | 30 return allocator_.GetLargestFreeSize(); |
| 29 } | 31 } |
| 30 | 32 |
| 31 // Gets the size of the largest free block that can be allocated if the | 33 // Gets the size of the largest free block that can be allocated if the |
| 32 // caller can wait. | 34 // caller can wait. |
| 33 unsigned int GetLargestFreeSizeWithWaiting() { | 35 unsigned int GetLargestFreeSizeWithWaiting() { |
| (...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 114 // Manages MemoryChunks. | 116 // Manages MemoryChunks. |
| 115 class GPU_EXPORT MappedMemoryManager { | 117 class GPU_EXPORT MappedMemoryManager { |
| 116 public: | 118 public: |
| 117 enum MemoryLimit { | 119 enum MemoryLimit { |
| 118 kNoLimit = 0, | 120 kNoLimit = 0, |
| 119 }; | 121 }; |
| 120 | 122 |
| 121 // |unused_memory_reclaim_limit|: When exceeded this causes pending memory | 123 // |unused_memory_reclaim_limit|: When exceeded this causes pending memory |
| 122 // to be reclaimed before allocating more memory. | 124 // to be reclaimed before allocating more memory. |
| 123 MappedMemoryManager(CommandBufferHelper* helper, | 125 MappedMemoryManager(CommandBufferHelper* helper, |
| 126 const base::Closure& poll_callback, |
| 124 size_t unused_memory_reclaim_limit); | 127 size_t unused_memory_reclaim_limit); |
| 125 | 128 |
| 126 ~MappedMemoryManager(); | 129 ~MappedMemoryManager(); |
| 127 | 130 |
| 128 unsigned int chunk_size_multiple() const { | 131 unsigned int chunk_size_multiple() const { |
| 129 return chunk_size_multiple_; | 132 return chunk_size_multiple_; |
| 130 } | 133 } |
| 131 | 134 |
| 132 void set_chunk_size_multiple(unsigned int multiple) { | 135 void set_chunk_size_multiple(unsigned int multiple) { |
| 133 chunk_size_multiple_ = multiple; | 136 chunk_size_multiple_ = multiple; |
| (...skipping 24 matching lines...) Expand all Loading... |
| 158 void FreePendingToken(void* pointer, int32 token); | 161 void FreePendingToken(void* pointer, int32 token); |
| 159 | 162 |
| 160 // Free Any Shared memory that is not in use. | 163 // Free Any Shared memory that is not in use. |
| 161 void FreeUnused(); | 164 void FreeUnused(); |
| 162 | 165 |
| 163 // Used for testing | 166 // Used for testing |
| 164 size_t num_chunks() const { | 167 size_t num_chunks() const { |
| 165 return chunks_.size(); | 168 return chunks_.size(); |
| 166 } | 169 } |
| 167 | 170 |
| 171 size_t bytes_in_use() const { |
| 172 size_t bytes_in_use = 0; |
| 173 for (size_t ii = 0; ii < chunks_.size(); ++ii) { |
| 174 MemoryChunk* chunk = chunks_[ii]; |
| 175 bytes_in_use += chunk->bytes_in_use(); |
| 176 } |
| 177 return bytes_in_use; |
| 178 } |
| 179 |
| 168 // Used for testing | 180 // Used for testing |
| 169 size_t allocated_memory() const { | 181 size_t allocated_memory() const { |
| 170 return allocated_memory_; | 182 return allocated_memory_; |
| 171 } | 183 } |
| 172 | 184 |
| 173 private: | 185 private: |
| 174 typedef ScopedVector<MemoryChunk> MemoryChunkVector; | 186 typedef ScopedVector<MemoryChunk> MemoryChunkVector; |
| 175 | 187 |
| 176 // size a chunk is rounded up to. | 188 // size a chunk is rounded up to. |
| 177 unsigned int chunk_size_multiple_; | 189 unsigned int chunk_size_multiple_; |
| 178 CommandBufferHelper* helper_; | 190 CommandBufferHelper* helper_; |
| 191 base::Closure poll_callback_; |
| 179 MemoryChunkVector chunks_; | 192 MemoryChunkVector chunks_; |
| 180 size_t allocated_memory_; | 193 size_t allocated_memory_; |
| 181 size_t max_free_bytes_; | 194 size_t max_free_bytes_; |
| 182 | 195 |
| 183 DISALLOW_COPY_AND_ASSIGN(MappedMemoryManager); | 196 DISALLOW_COPY_AND_ASSIGN(MappedMemoryManager); |
| 184 }; | 197 }; |
| 185 | 198 |
| 186 } // namespace gpu | 199 } // namespace gpu |
| 187 | 200 |
| 188 #endif // GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_ | 201 #endif // GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_ |
| 189 | 202 |
| OLD | NEW |