Index: media/audio/shared_mem_synchronizer_unittest.cc |
diff --git a/media/audio/shared_mem_synchronizer_unittest.cc b/media/audio/shared_mem_synchronizer_unittest.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..ac9042a8a84b8a4d3f226b1ec5b84c1cea9be79f |
--- /dev/null |
+++ b/media/audio/shared_mem_synchronizer_unittest.cc |
@@ -0,0 +1,429 @@ |
+// Copyright (c) 2012 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "base/compiler_specific.h" |
+#include "base/logging.h" |
+#include "base/shared_memory.h" |
+#include "base/test/multiprocess_test.h" |
+#include "base/threading/platform_thread.h" |
+#include "media/audio/shared_mem_synchronizer.h" |
+#include "testing/gtest/include/gtest/gtest.h" |
+#include "testing/multiprocess_func_list.h" |
+ |
+namespace { |
+// A simple thread that we'll run two instances of. Both threads get |
+// a pointer to the same |shared_data| and use a SharedMemSynchronizer to |
+// control when each thread can read/write. |
+class SingleSynchronizerWorker : public base::PlatformThread::Delegate { |
+ public: |
+ SingleSynchronizerWorker(size_t* shared_data, size_t repeats, |
+ SharedMemSynchronizer* synchronizer) |
+ : shared_data_(shared_data), repeats_(repeats), |
+ synchronizer_(synchronizer) { |
+ } |
+ virtual ~SingleSynchronizerWorker() {} |
+ |
+ virtual void ThreadMain() OVERRIDE { |
+ for (size_t i = 0; i < repeats_; ++i) { |
+ synchronizer_->Wait(); |
+ ++(*shared_data_); |
+ synchronizer_->Signal(); |
+ } |
+ } |
+ |
+ private: |
+ size_t* shared_data_; |
+ size_t repeats_; |
+ SharedMemSynchronizer* synchronizer_; |
+ DISALLOW_COPY_AND_ASSIGN(SingleSynchronizerWorker); |
+}; |
+ |
+// Similar to SingleSynchronizerWorker, except each instance of this class will |
+// have >1 instances of SharedMemSynchronizer to Wait/Signal and an equal amount |
+// of |shared_data| that the synchronizers control access to. |
+class MultiSynchronizerWorker : public base::PlatformThread::Delegate { |
+ public: |
+ MultiSynchronizerWorker(size_t* shared_data, size_t repeats, |
+ SharedMemSynchronizer* synchronizers, |
+ size_t count) |
+ : shared_data_(shared_data), repeats_(repeats), |
+ synchronizers_(synchronizers), count_(count) { |
+ } |
+ virtual ~MultiSynchronizerWorker() {} |
+ |
+ virtual void ThreadMain() OVERRIDE { |
+ SharedMemSynchronizer::WaitForMultiple waiter(synchronizers_, count_); |
+ for (size_t i = 0; i < repeats_; ++i) { |
+ int signaled = waiter.Wait(); |
+ ++shared_data_[signaled]; |
+ synchronizers_[signaled].Signal(); |
+ } |
+ } |
+ |
+ private: |
+ size_t* shared_data_; |
+ size_t repeats_; |
+ SharedMemSynchronizer* synchronizers_; |
+ size_t count_; |
+ DISALLOW_COPY_AND_ASSIGN(MultiSynchronizerWorker); |
+}; |
+ |
+// A fixed array of bool flags. Each flag uses 1 bit. Use sizeof(FlagArray) |
+// to determine how much memory you need. The number of flags will therefore |
+// be sizeof(FlagArray) * 8. |
+// We use 'struct' to signify that this structures represents compiler |
+// independent structured data. I.e. you must be able to map this class |
+// to a piece of shared memory of size sizeof(FlagArray) and be able to |
+// use the class. No vtables etc. |
+// TODO(tommi): Move this to its own header when we start using it for signaling |
+// audio devices. As is, it's just here for perf comparison against the |
+// "multiple synchronizers" approach. |
+struct FlagArray { |
+ public: |
+ FlagArray() : flags_() {} |
+ |
+ bool is_set(size_t index) const { |
+ return (flags_[index >> 5] & (1 << (index & 31))) ? true : false; |
+ } |
+ |
+ void set(size_t index) { |
+ flags_[index >> 5] |= (1U << (static_cast<uint32>(index) & 31)); |
+ } |
+ |
+ void clear(size_t index) { |
+ flags_[index >> 5] &= ~(1U << (static_cast<uint32>(index) & 31)); |
+ } |
+ |
+ // Returns the number of flags that can be set/checked. |
+ size_t size() const { return sizeof(flags_) * 8; } |
+ |
+ private: |
+ // 256 * 32 = 8192 flags in 1KB. |
+ uint32 flags_[256]; |
+ DISALLOW_COPY_AND_ASSIGN(FlagArray); |
+}; |
+ |
+class MultiSynchronizerWorkerFlagArray : public base::PlatformThread::Delegate { |
+ public: |
+ MultiSynchronizerWorkerFlagArray(size_t count, FlagArray* signals, |
+ size_t* shared_data, size_t repeats, |
+ SharedMemSynchronizer* synchronizer) |
+ : count_(count), signals_(signals), shared_data_(shared_data), |
+ repeats_(repeats), synchronizer_(synchronizer) { |
+ } |
+ virtual ~MultiSynchronizerWorkerFlagArray() {} |
+ |
+ virtual void ThreadMain() OVERRIDE { |
+ for (size_t i = 0; i < repeats_; ++i) { |
+ synchronizer_->Wait(); |
+ for (size_t s = 0; s < count_; ++s) { |
+ if (signals_->is_set(s)) { |
+ ++shared_data_[s]; |
+ // We don't clear the flag here but simply leave it signaled because |
+ // we want the other thread to also increment this variable. |
+ } |
+ } |
+ synchronizer_->Signal(); |
+ } |
+ } |
+ |
+ private: |
+ size_t count_; |
+ FlagArray* signals_; |
+ size_t* shared_data_; |
+ size_t repeats_; |
+ SharedMemSynchronizer* synchronizer_; |
+ DISALLOW_COPY_AND_ASSIGN(MultiSynchronizerWorkerFlagArray); |
+}; |
+ |
+} // end namespace |
+ |
+TEST(SharedMemSynchronizer, FlagArray) { |
+ FlagArray flags; |
+ EXPECT_GT(flags.size(), 1000U); |
+ for (size_t i = 0; i < flags.size(); ++i) { |
+ EXPECT_FALSE(flags.is_set(i)); |
+ flags.set(i); |
+ EXPECT_TRUE(flags.is_set(i)); |
+ flags.clear(i); |
+ EXPECT_FALSE(flags.is_set(i)); |
+ } |
+} |
+ |
+// Initializes two synchronizers, signals the each one and make sure the others |
+// wait is satisfied. |
+TEST(SharedMemSynchronizer, Basic) { |
+ SharedMemSynchronizer a, b; |
+ ASSERT_TRUE(SharedMemSynchronizer::InitializePair(&a, &b)); |
+ EXPECT_TRUE(a.IsValid()); |
+ EXPECT_TRUE(b.IsValid()); |
+ |
+ a.Signal(); |
+ b.Wait(); |
+ |
+ b.Signal(); |
+ a.Wait(); |
+} |
+ |
+// Spins two worker threads, each with their own SharedMemSynchronizer that they |
+// use to read and write from a shared memory buffer. |
+TEST(SharedMemSynchronizer, TwoThreads) { |
+ SharedMemSynchronizer a, b; |
+ ASSERT_TRUE(SharedMemSynchronizer::InitializePair(&a, &b)); |
+ |
+ size_t data = 0; |
+ const size_t kRepeats = 10000; |
+ SingleSynchronizerWorker worker_1(&data, kRepeats, &a); |
+ SingleSynchronizerWorker worker_2(&data, kRepeats, &b); |
+ base::PlatformThreadHandle thread_1, thread_2; |
+ base::PlatformThread::Create(0, &worker_1, &thread_1); |
+ base::PlatformThread::Create(0, &worker_2, &thread_2); |
+ |
+ // Start the first thread. They should ping pong a few times and take turns |
+ // incrementing the shared variable and never step on each other's toes. |
+ a.Signal(); |
+ |
+ base::PlatformThread::Join(thread_1); |
+ base::PlatformThread::Join(thread_2); |
+ |
+ EXPECT_EQ(kRepeats * 2, data); |
+} |
+ |
+// Uses a pair of threads to access up to 1000 pieces of synchronized shared |
+// data. On regular dev machines, the number of synchronizers should be 1000, |
+// but on mac and linux bots, the number will be smaller due to the |
+// RLIMIT_NOFILE limit. Specifically, linux will have this limit at 1024 which |
+// means for this test that the max number of synchronizers will be in the |
+// range 500-512. On Mac the limit is 256, so |count| will be ~120. Oh, and |
+// raising the limit via setrlimit() won't work. |
+TEST(SharedMemSynchronizer, ThousandSynchronizersTwoThreads) { |
+ const size_t kCount = 1000; |
+ SharedMemSynchronizer a[kCount], b[kCount]; |
+ size_t count = 0; |
+ for (size_t i = 0; i < kCount; ++i) { |
+ if (!SharedMemSynchronizer::InitializePair(&a[i], &b[i])) { |
+ LOG(WARNING) << "SharedMemSynchronizer::InitializePair failed at " << i; |
+ break; |
+ } |
+ ++count; |
+ } |
+ |
+ size_t data[kCount] = {0}; |
+ // We use a multiple of the count so that the division in the check below |
+ // will be nice and round. |
+ size_t repeats = count * 1; |
+ MultiSynchronizerWorker worker_1(&data[0], repeats, &a[0], count); |
+ MultiSynchronizerWorker worker_2(&data[0], repeats, &b[0], count); |
+ base::PlatformThreadHandle thread_1, thread_2; |
+ base::PlatformThread::Create(0, &worker_1, &thread_1); |
+ base::PlatformThread::Create(0, &worker_2, &thread_2); |
+ |
+ for (size_t i = 0; i < count; ++i) |
+ a[i].Signal(); |
+ |
+ base::PlatformThread::Join(thread_1); |
+ base::PlatformThread::Join(thread_2); |
+ |
+ size_t expected_total = count * 2; |
+ size_t total = 0; |
+ for (size_t i = 0; i < count; ++i) { |
+ // The SharedMemSynchronizer::WaitForMultiple class should have ensured that |
+ // all synchronizers had the same quality of service. |
+ EXPECT_EQ(expected_total / count, data[i]); |
+ total += data[i]; |
+ } |
+ EXPECT_EQ(expected_total, total); |
+} |
+ |
+// Functionally equivalent (as far as the shared data goes) to the |
+// ThousandSynchronizersTwoThreads test but uses a single pair of |
+// synchronizers + FlagArray for the 1000 signals. |
+// This approach is significantly faster. |
+TEST(SharedMemSynchronizer, TwoSynchronizersTwoThreads1000Signals) { |
+ SharedMemSynchronizer a, b; |
+ ASSERT_TRUE(SharedMemSynchronizer::InitializePair(&a, &b)); |
+ |
+ const size_t kCount = 1000; |
+ FlagArray signals; |
+ ASSERT_GE(signals.size(), kCount); |
+ size_t data[kCount] = {0}; |
+ |
+ // Since this algorithm checks all events each time the synchronizer is |
+ // signaled, |repeat| doesn't mean the same thing here as it does in |
+ // ThousandsynchronizersTwoThreads. 1 repeat here is the same as kCount |
+ // repeats in ThousandsynchronizersTwoThreads. |
+ size_t repeats = 1; |
+ MultiSynchronizerWorkerFlagArray worker_1( |
+ kCount, &signals, &data[0], repeats, &a); |
+ MultiSynchronizerWorkerFlagArray worker_2( |
+ kCount, &signals, &data[0], repeats, &b); |
+ base::PlatformThreadHandle thread_1, thread_2; |
+ base::PlatformThread::Create(0, &worker_1, &thread_1); |
+ base::PlatformThread::Create(0, &worker_2, &thread_2); |
+ |
+ for (size_t i = 0; i < kCount; ++i) |
+ signals.set(i); |
+ a.Signal(); |
+ |
+ base::PlatformThread::Join(thread_1); |
+ base::PlatformThread::Join(thread_2); |
+ |
+ size_t expected_total = kCount * 2; |
+ size_t total = 0; |
+ for (size_t i = 0; i < kCount; ++i) { |
+ // Since for each signal, we process all signaled events, the shared data |
+ // variables should all be equal. |
+ EXPECT_EQ(expected_total / kCount, data[i]); |
+ total += data[i]; |
+ } |
+ EXPECT_EQ(expected_total, total); |
+} |
+ |
+// Test the maximum number of synchronizers without spinning further wait |
+// threads on Windows. This test assumes we can always create 64 pairs and |
+// bails if we can't. |
+TEST(SharedMemSynchronizer, MultipleWaits64) { |
+ const size_t kCount = 64; |
+ SharedMemSynchronizer a[kCount], b[kCount]; |
+ for (size_t i = 0; i < kCount; ++i) { |
+ ASSERT_TRUE(SharedMemSynchronizer::InitializePair(&a[i], &b[i])); |
+ } |
+ |
+ SharedMemSynchronizer::WaitForMultiple waiter(&b[0], kCount); |
+ for (size_t i = 0; i < kCount; ++i) { |
+ a[i].Signal(); |
+ int index = waiter.Wait(); |
+ EXPECT_EQ(i, static_cast<size_t>(index)); |
+ } |
+} |
+ |
+// Tests waiting for more synchronizers than the OS supports on one thread. |
+// The test will create at most 1000 pairs, but on mac/linux bots the actual |
+// number will be lower. See comment about the RLIMIT_NOFILE limit above for |
+// more details. |
+TEST(SharedMemSynchronizer, MultipleWaits1000) { |
+ // A 1000 synchronizers requires 16 threads on Windows, including the current |
+ // one, to perform the wait operation. |
+ const size_t kCount = 1000; |
+ SharedMemSynchronizer a[kCount], b[kCount]; |
+ size_t count = 0; |
+ for (size_t i = 0; i < kCount; ++i) { |
+ if (!SharedMemSynchronizer::InitializePair(&a[i], &b[i])) { |
+ LOG(WARNING) << "SharedMemSynchronizer::InitializePair failed at " << i; |
+ break; |
+ } |
+ ++count; |
+ } |
+ |
+ for (size_t i = 0; i < count; ++i) { |
+ a[i].Signal(); |
+ // To disable the load distribution algorithm and force the extra worker |
+ // thread(s) to catch the signaled event, we define the |waiter| inside |
+ // the loop. |
+ SharedMemSynchronizer::WaitForMultiple waiter(&b[0], count); |
+ int index = waiter.Wait(); |
+ EXPECT_EQ(i, static_cast<size_t>(index)); |
+ } |
+} |
+ |
+class SharedMemSynchronizerMultiProcessTest : public base::MultiProcessTest { |
+ public: |
+ static const char kSharedMemName[]; |
+ static const size_t kSharedMemSize = 1024; |
+ |
+ protected: |
+ virtual void SetUp() OVERRIDE { |
+ base::MultiProcessTest::SetUp(); |
+ } |
+ |
+ virtual void TearDown() OVERRIDE { |
+ base::MultiProcessTest::TearDown(); |
+ } |
+}; |
+ |
+// static |
+const char SharedMemSynchronizerMultiProcessTest::kSharedMemName[] = |
+ "SharedMemSynchronizerMultiProcessTest"; |
+ |
+namespace { |
+// A very crude IPC mechanism that we use to set up the spawned child process |
+// and the parent process. |
+struct CrudeIpc { |
+ uint8 ready; |
+ SharedMemSynchronizer::IPCHandle handle_1; |
+ SharedMemSynchronizer::IPCHandle handle_2; |
+}; |
+} // end namespace |
+ |
+// The main routine of the child process. Waits for the parent process |
+// to copy handles over to the child and then uses a SharedMemSynchronizer to |
+// wait and signal to the parent process. |
+MULTIPROCESS_TEST_MAIN(SharedMemSynchronizerChildMain) { |
+ base::SharedMemory mem; |
+ bool ok = mem.CreateNamed( |
+ SharedMemSynchronizerMultiProcessTest::kSharedMemName, |
+ true, |
+ SharedMemSynchronizerMultiProcessTest::kSharedMemSize); |
+ DCHECK(ok); |
+ if (!ok) { |
+ LOG(ERROR) << "Failed to open shared memory segment."; |
+ return -1; |
+ } |
+ |
+ mem.Map(SharedMemSynchronizerMultiProcessTest::kSharedMemSize); |
+ CrudeIpc* ipc = reinterpret_cast<CrudeIpc*>(mem.memory()); |
+ |
+ while (!ipc->ready) |
+ base::PlatformThread::Sleep(10); |
+ |
+ SharedMemSynchronizer synchronizer(ipc->handle_1, ipc->handle_2); |
+ synchronizer.Wait(); |
+ synchronizer.Signal(); |
+ |
+ return 0; |
+} |
+ |
+// Spawns a new process and hands a SharedMemSynchronizer instance to the |
+// new process. Once that's done, it waits for the child process to signal |
+// it's end and quits. |
+TEST_F(SharedMemSynchronizerMultiProcessTest, Basic) { |
+ base::SharedMemory mem; |
+ mem.Delete(kSharedMemName); // In case a previous run was unsuccessful. |
+ bool ok = mem.CreateNamed(kSharedMemName, false, kSharedMemSize); |
+ ASSERT_TRUE(ok); |
+ |
+ ASSERT_TRUE(mem.Map(kSharedMemSize)); |
+ |
+ SharedMemSynchronizer a, b; |
+ ASSERT_TRUE(SharedMemSynchronizer::InitializePair(&a, &b)); |
+ EXPECT_TRUE(a.IsValid()); |
+ EXPECT_TRUE(b.IsValid()); |
+ |
+ CrudeIpc* ipc = reinterpret_cast<CrudeIpc*>(mem.memory()); |
+ ipc->ready = false; |
+ |
+#if defined(OS_POSIX) |
+ const int kPosixChildSocket = 20; |
+ EXPECT_TRUE(b.ShareToProcess(NULL, &ipc->handle_1, &ipc->handle_2)); |
+ base::FileHandleMappingVector fd_mapping_vec; |
+ fd_mapping_vec.push_back(std::pair<int, int>(ipc->handle_1.fd, |
+ kPosixChildSocket)); |
+ ipc->handle_1.fd = kPosixChildSocket; |
+ base::ProcessHandle process = SpawnChild("SharedMemSynchronizerChildMain", |
+ fd_mapping_vec, false); |
+#else |
+ base::ProcessHandle process = SpawnChild("SharedMemSynchronizerChildMain", |
+ false); |
+ EXPECT_TRUE(b.ShareToProcess(process, &ipc->handle_1, &ipc->handle_2)); |
+#endif |
+ |
+ ipc->ready = true; |
+ |
+ a.Signal(); |
+ a.Wait(); |
+ |
+ int exit_code = -1; |
+ base::WaitForExitCode(process, &exit_code); |
+ EXPECT_EQ(0, exit_code); |
+} |