Index: gpu/command_buffer/client/mapped_memory_unittest.cc |
diff --git a/gpu/command_buffer/client/mapped_memory_unittest.cc b/gpu/command_buffer/client/mapped_memory_unittest.cc |
index 3e174fad1c955c77b421d5ea404fec321aa1f795..d85311910ddadc9eadc0ab56de145393b7aae23a 100644 |
--- a/gpu/command_buffer/client/mapped_memory_unittest.cc |
+++ b/gpu/command_buffer/client/mapped_memory_unittest.cc |
@@ -4,6 +4,7 @@ |
#include "gpu/command_buffer/client/mapped_memory.h" |
+#include <list> |
#include "base/bind.h" |
#include "base/memory/scoped_ptr.h" |
#include "base/message_loop/message_loop.h" |
@@ -85,6 +86,11 @@ class MappedMemoryTestBase : public testing::Test { |
const unsigned int MappedMemoryTestBase::kBufferSize; |
#endif |
+namespace { |
+void EmptyPoll() { |
+} |
+} |
+ |
// Test fixture for MemoryChunk test - Creates a MemoryChunk, using a |
// CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling |
// it directly, not through the RPC mechanism), making sure Noops are ignored |
@@ -97,7 +103,10 @@ class MemoryChunkTest : public MappedMemoryTestBase { |
scoped_ptr<base::SharedMemory> shared_memory(new base::SharedMemory()); |
shared_memory->CreateAndMapAnonymous(kBufferSize); |
buffer_ = new gpu::Buffer(shared_memory.Pass(), kBufferSize); |
- chunk_.reset(new MemoryChunk(kShmId, buffer_, helper_.get())); |
+ chunk_.reset(new MemoryChunk(kShmId, |
+ buffer_, |
+ helper_.get(), |
+ base::Bind(&EmptyPoll))); |
} |
virtual void TearDown() { |
@@ -148,11 +157,16 @@ TEST_F(MemoryChunkTest, Basic) { |
} |
class MappedMemoryManagerTest : public MappedMemoryTestBase { |
+ public: |
+ MappedMemoryManager* manager() const { |
+ return manager_.get(); |
+ } |
+ |
protected: |
virtual void SetUp() { |
MappedMemoryTestBase::SetUp(); |
manager_.reset(new MappedMemoryManager( |
- helper_.get(), MappedMemoryManager::kNoLimit)); |
+ helper_.get(), base::Bind(&EmptyPoll), MappedMemoryManager::kNoLimit)); |
} |
virtual void TearDown() { |
@@ -312,7 +326,8 @@ TEST_F(MappedMemoryManagerTest, ChunkSizeMultiple) { |
TEST_F(MappedMemoryManagerTest, UnusedMemoryLimit) { |
const unsigned int kChunkSize = 2048; |
// Reset the manager with a memory limit. |
- manager_.reset(new MappedMemoryManager(helper_.get(), kChunkSize)); |
+ manager_.reset(new MappedMemoryManager( |
+ helper_.get(), base::Bind(&EmptyPoll), kChunkSize)); |
manager_->set_chunk_size_multiple(kChunkSize); |
// Allocate one chunk worth of memory. |
@@ -340,7 +355,8 @@ TEST_F(MappedMemoryManagerTest, UnusedMemoryLimit) { |
TEST_F(MappedMemoryManagerTest, MemoryLimitWithReuse) { |
const unsigned int kSize = 1024; |
// Reset the manager with a memory limit. |
- manager_.reset(new MappedMemoryManager(helper_.get(), kSize)); |
+ manager_.reset(new MappedMemoryManager( |
+ helper_.get(), base::Bind(&EmptyPoll), kSize)); |
const unsigned int kChunkSize = 2 * 1024; |
manager_->set_chunk_size_multiple(kChunkSize); |
@@ -386,4 +402,55 @@ TEST_F(MappedMemoryManagerTest, MemoryLimitWithReuse) { |
EXPECT_EQ(1 * kChunkSize, manager_->allocated_memory()); |
} |
+namespace { |
+void Poll(MappedMemoryManagerTest *test, std::list<void*>* list) { |
+ std::list<void*>::iterator it = list->begin(); |
+ while (it != list->end()) { |
+ void* address = *it; |
+ test->manager()->Free(address); |
+ it = list->erase(it); |
+ } |
+} |
+} |
+ |
+TEST_F(MappedMemoryManagerTest, Poll) { |
+ std::list<void*> unmanaged_memory_list; |
+ |
+ const unsigned int kSize = 1024; |
+ // Reset the manager with a memory limit. |
+ manager_.reset(new MappedMemoryManager( |
+ helper_.get(), |
+ base::Bind(&Poll, this, &unmanaged_memory_list), |
+ kSize)); |
+ |
+ // Allocate kSize bytes. Don't add the address to |
+ // the unmanaged memory list, so that it won't be free:ed just yet. |
+ int32 id1; |
+ unsigned int offset1; |
+ void* mem1 = manager_->Alloc(kSize, &id1, &offset1); |
+ EXPECT_EQ(manager_->bytes_in_use(), kSize); |
+ |
+ // Allocate kSize more bytes, and make sure we grew. |
+ int32 id2; |
+ unsigned int offset2; |
+ void* mem2 = manager_->Alloc(kSize, &id2, &offset2); |
+ EXPECT_EQ(manager_->bytes_in_use(), kSize * 2); |
+ |
+ // Make the unmanaged buffer be released next time FreeUnused() is called |
+ // in MappedMemoryManager/FencedAllocator. This happens for example when |
+ // allocating new memory. |
+ unmanaged_memory_list.push_back(mem1); |
+ |
+ // Allocate kSize more bytes. This should poll unmanaged memory, which now |
+ // should free the previously allocated unmanaged memory. |
+ int32 id3; |
+ unsigned int offset3; |
+ void* mem3 = manager_->Alloc(kSize, &id3, &offset3); |
+ EXPECT_EQ(manager_->bytes_in_use(), kSize * 2); |
+ |
+ manager_->Free(mem2); |
+ manager_->Free(mem3); |
+ EXPECT_EQ(manager_->bytes_in_use(), static_cast<size_t>(0)); |
+} |
+ |
} // namespace gpu |