OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/compiler_specific.h" | |
6 #include "base/logging.h" | |
7 #include "base/shared_memory.h" | |
8 #include "base/test/multiprocess_test.h" | |
9 #include "base/threading/platform_thread.h" | |
10 #include "media/audio/shared_mem_synchronizer.h" | |
11 #include "testing/gtest/include/gtest/gtest.h" | |
12 #include "testing/multiprocess_func_list.h" | |
13 | |
14 namespace { | |
15 | |
Ami GONE FROM CHROMIUM
2012/03/13 20:08:02
Any reason to use a stack array instead of just us
tommi (sloooow) - chröme
2012/03/14 13:32:43
Done.
The alternative is to have two loops, one f
| |
16 // A convenience method to populate a vector of SharedMemSynchronizer pointers | |
17 // from an on-stack array. | |
18 void PopulateVectorFromStackArray( | |
19 SharedMemSynchronizer::SynchronizerVector* synchronizers, | |
20 SharedMemSynchronizer* array, | |
21 size_t count) { | |
22 synchronizers->resize(count); | |
23 for (size_t i = 0; i < count; ++i) | |
24 (*synchronizers)[i] = &array[i]; | |
25 } | |
26 | |
27 // A simple thread that we'll run two instances of. Both threads get | |
28 // a pointer to the same |shared_data| and use a SharedMemSynchronizer to | |
29 // control when each thread can read/write. | |
30 class SingleSynchronizerWorker : public base::PlatformThread::Delegate { | |
31 public: | |
32 SingleSynchronizerWorker(size_t* shared_data, size_t repeats, | |
33 SharedMemSynchronizer* synchronizer) | |
34 : shared_data_(shared_data), repeats_(repeats), | |
35 synchronizer_(synchronizer) { | |
36 } | |
37 virtual ~SingleSynchronizerWorker() {} | |
38 | |
39 virtual void ThreadMain() OVERRIDE { | |
40 for (size_t i = 0; i < repeats_; ++i) { | |
41 synchronizer_->Wait(); | |
42 ++(*shared_data_); | |
43 synchronizer_->Signal(); | |
44 } | |
45 } | |
46 | |
47 private: | |
48 size_t* shared_data_; | |
49 size_t repeats_; | |
50 SharedMemSynchronizer* synchronizer_; | |
51 DISALLOW_COPY_AND_ASSIGN(SingleSynchronizerWorker); | |
52 }; | |
53 | |
54 // Similar to SingleSynchronizerWorker, except each instance of this class will | |
55 // have >1 instances of SharedMemSynchronizer to Wait/Signal and an equal amount | |
56 // of |shared_data| that the synchronizers control access to. | |
57 class MultiSynchronizerWorker : public base::PlatformThread::Delegate { | |
58 public: | |
59 MultiSynchronizerWorker(size_t* shared_data, size_t repeats, | |
60 const SharedMemSynchronizer::SynchronizerVector* synchronizers) | |
61 : shared_data_(shared_data), repeats_(repeats), | |
62 synchronizers_(synchronizers) { | |
63 } | |
64 virtual ~MultiSynchronizerWorker() {} | |
65 | |
66 virtual void ThreadMain() OVERRIDE { | |
67 SharedMemSynchronizer::WaitForMultiple waiter(synchronizers_); | |
68 for (size_t i = 0; i < repeats_; ++i) { | |
69 int signaled = waiter.Wait(); | |
70 ++shared_data_[signaled]; | |
71 (*synchronizers_)[signaled]->Signal(); | |
72 } | |
73 } | |
74 | |
75 private: | |
76 size_t* shared_data_; | |
77 size_t repeats_; | |
78 const SharedMemSynchronizer::SynchronizerVector* synchronizers_; | |
79 size_t count_; | |
80 DISALLOW_COPY_AND_ASSIGN(MultiSynchronizerWorker); | |
81 }; | |
82 | |
83 // A fixed array of bool flags. Each flag uses 1 bit. Use sizeof(FlagArray) | |
84 // to determine how much memory you need. The number of flags will therefore | |
85 // be sizeof(FlagArray) * 8. | |
86 // We use 'struct' to signify that this structures represents compiler | |
87 // independent structured data. I.e. you must be able to map this class | |
88 // to a piece of shared memory of size sizeof(FlagArray) and be able to | |
89 // use the class. No vtables etc. | |
90 // TODO(tommi): Move this to its own header when we start using it for signaling | |
91 // audio devices. As is, it's just here for perf comparison against the | |
92 // "multiple synchronizers" approach. | |
93 struct FlagArray { | |
94 public: | |
95 FlagArray() : flags_() {} | |
96 | |
97 bool is_set(size_t index) const { | |
98 return (flags_[index >> 5] & (1 << (index & 31))) ? true : false; | |
99 } | |
100 | |
101 void set(size_t index) { | |
102 flags_[index >> 5] |= (1U << (static_cast<uint32>(index) & 31)); | |
103 } | |
104 | |
105 void clear(size_t index) { | |
106 flags_[index >> 5] &= ~(1U << (static_cast<uint32>(index) & 31)); | |
107 } | |
108 | |
109 // Returns the number of flags that can be set/checked. | |
110 size_t size() const { return sizeof(flags_) * 8; } | |
111 | |
112 private: | |
113 // 256 * 32 = 8192 flags in 1KB. | |
114 uint32 flags_[256]; | |
115 DISALLOW_COPY_AND_ASSIGN(FlagArray); | |
116 }; | |
117 | |
118 class MultiSynchronizerWorkerFlagArray : public base::PlatformThread::Delegate { | |
119 public: | |
120 MultiSynchronizerWorkerFlagArray(size_t count, FlagArray* signals, | |
121 size_t* shared_data, size_t repeats, | |
122 SharedMemSynchronizer* synchronizer) | |
123 : count_(count), signals_(signals), shared_data_(shared_data), | |
124 repeats_(repeats), synchronizer_(synchronizer) { | |
125 } | |
126 virtual ~MultiSynchronizerWorkerFlagArray() {} | |
127 | |
128 virtual void ThreadMain() OVERRIDE { | |
129 for (size_t i = 0; i < repeats_; ++i) { | |
130 synchronizer_->Wait(); | |
131 for (size_t s = 0; s < count_; ++s) { | |
132 if (signals_->is_set(s)) { | |
133 ++shared_data_[s]; | |
134 // We don't clear the flag here but simply leave it signaled because | |
135 // we want the other thread to also increment this variable. | |
136 } | |
137 } | |
138 synchronizer_->Signal(); | |
139 } | |
140 } | |
141 | |
142 private: | |
143 size_t count_; | |
144 FlagArray* signals_; | |
145 size_t* shared_data_; | |
146 size_t repeats_; | |
147 SharedMemSynchronizer* synchronizer_; | |
148 DISALLOW_COPY_AND_ASSIGN(MultiSynchronizerWorkerFlagArray); | |
149 }; | |
150 | |
151 } // end namespace | |
152 | |
153 TEST(SharedMemSynchronizer, FlagArray) { | |
154 FlagArray flags; | |
155 EXPECT_GT(flags.size(), 1000U); | |
156 for (size_t i = 0; i < flags.size(); ++i) { | |
157 EXPECT_FALSE(flags.is_set(i)); | |
158 flags.set(i); | |
159 EXPECT_TRUE(flags.is_set(i)); | |
160 flags.clear(i); | |
161 EXPECT_FALSE(flags.is_set(i)); | |
162 } | |
163 } | |
164 | |
165 // Initializes two synchronizers, signals the each one and make sure the others | |
166 // wait is satisfied. | |
167 TEST(SharedMemSynchronizer, Basic) { | |
168 SharedMemSynchronizer a, b; | |
169 ASSERT_TRUE(SharedMemSynchronizer::InitializePair(&a, &b)); | |
170 EXPECT_TRUE(a.IsValid()); | |
171 EXPECT_TRUE(b.IsValid()); | |
172 | |
173 a.Signal(); | |
174 b.Wait(); | |
175 | |
176 b.Signal(); | |
177 a.Wait(); | |
178 } | |
179 | |
180 // Spins two worker threads, each with their own SharedMemSynchronizer that they | |
181 // use to read and write from a shared memory buffer. | |
182 TEST(SharedMemSynchronizer, TwoThreads) { | |
183 SharedMemSynchronizer a, b; | |
184 ASSERT_TRUE(SharedMemSynchronizer::InitializePair(&a, &b)); | |
185 | |
186 size_t data = 0; | |
187 const size_t kRepeats = 10000; | |
188 SingleSynchronizerWorker worker_1(&data, kRepeats, &a); | |
189 SingleSynchronizerWorker worker_2(&data, kRepeats, &b); | |
190 base::PlatformThreadHandle thread_1, thread_2; | |
191 base::PlatformThread::Create(0, &worker_1, &thread_1); | |
192 base::PlatformThread::Create(0, &worker_2, &thread_2); | |
193 | |
194 // Start the first thread. They should ping pong a few times and take turns | |
195 // incrementing the shared variable and never step on each other's toes. | |
196 a.Signal(); | |
197 | |
198 base::PlatformThread::Join(thread_1); | |
199 base::PlatformThread::Join(thread_2); | |
200 | |
201 EXPECT_EQ(kRepeats * 2, data); | |
202 } | |
203 | |
204 // Uses a pair of threads to access up to 1000 pieces of synchronized shared | |
205 // data. On regular dev machines, the number of synchronizers should be 1000, | |
206 // but on mac and linux bots, the number will be smaller due to the | |
207 // RLIMIT_NOFILE limit. Specifically, linux will have this limit at 1024 which | |
208 // means for this test that the max number of synchronizers will be in the | |
209 // range 500-512. On Mac the limit is 256, so |count| will be ~120. Oh, and | |
210 // raising the limit via setrlimit() won't work. | |
211 TEST(SharedMemSynchronizer, ThousandSynchronizersTwoThreads) { | |
212 const size_t kCount = 1000; | |
213 SharedMemSynchronizer a[kCount], b[kCount]; | |
214 size_t count = 0; | |
215 for (size_t i = 0; i < kCount; ++i) { | |
216 if (!SharedMemSynchronizer::InitializePair(&a[i], &b[i])) { | |
217 LOG(WARNING) << "SharedMemSynchronizer::InitializePair failed at " << i; | |
218 break; | |
219 } | |
220 ++count; | |
221 } | |
222 | |
223 size_t data[kCount] = {0}; | |
224 // We use a multiple of the count so that the division in the check below | |
225 // will be nice and round. | |
226 size_t repeats = count * 1; | |
227 | |
228 SharedMemSynchronizer::SynchronizerVector a_vector, b_vector; | |
229 PopulateVectorFromStackArray(&a_vector, &a[0], count); | |
230 PopulateVectorFromStackArray(&b_vector, &b[0], count); | |
231 | |
232 MultiSynchronizerWorker worker_1(&data[0], repeats, &a_vector); | |
233 MultiSynchronizerWorker worker_2(&data[0], repeats, &b_vector); | |
234 base::PlatformThreadHandle thread_1, thread_2; | |
235 base::PlatformThread::Create(0, &worker_1, &thread_1); | |
236 base::PlatformThread::Create(0, &worker_2, &thread_2); | |
237 | |
238 for (size_t i = 0; i < count; ++i) | |
239 a[i].Signal(); | |
240 | |
241 base::PlatformThread::Join(thread_1); | |
242 base::PlatformThread::Join(thread_2); | |
243 | |
244 size_t expected_total = count * 2; | |
245 size_t total = 0; | |
246 for (size_t i = 0; i < count; ++i) { | |
247 // The SharedMemSynchronizer::WaitForMultiple class should have ensured that | |
248 // all synchronizers had the same quality of service. | |
249 EXPECT_EQ(expected_total / count, data[i]); | |
250 total += data[i]; | |
251 } | |
252 EXPECT_EQ(expected_total, total); | |
253 } | |
254 | |
255 // Functionally equivalent (as far as the shared data goes) to the | |
256 // ThousandSynchronizersTwoThreads test but uses a single pair of | |
257 // synchronizers + FlagArray for the 1000 signals. | |
258 // This approach is significantly faster. | |
259 TEST(SharedMemSynchronizer, TwoSynchronizersTwoThreads1000Signals) { | |
260 SharedMemSynchronizer a, b; | |
261 ASSERT_TRUE(SharedMemSynchronizer::InitializePair(&a, &b)); | |
262 | |
263 const size_t kCount = 1000; | |
264 FlagArray signals; | |
265 ASSERT_GE(signals.size(), kCount); | |
266 size_t data[kCount] = {0}; | |
267 | |
268 // Since this algorithm checks all events each time the synchronizer is | |
269 // signaled, |repeat| doesn't mean the same thing here as it does in | |
270 // ThousandsynchronizersTwoThreads. 1 repeat here is the same as kCount | |
271 // repeats in ThousandsynchronizersTwoThreads. | |
272 size_t repeats = 1; | |
273 MultiSynchronizerWorkerFlagArray worker_1( | |
274 kCount, &signals, &data[0], repeats, &a); | |
275 MultiSynchronizerWorkerFlagArray worker_2( | |
276 kCount, &signals, &data[0], repeats, &b); | |
277 base::PlatformThreadHandle thread_1, thread_2; | |
278 base::PlatformThread::Create(0, &worker_1, &thread_1); | |
279 base::PlatformThread::Create(0, &worker_2, &thread_2); | |
280 | |
281 for (size_t i = 0; i < kCount; ++i) | |
282 signals.set(i); | |
283 a.Signal(); | |
284 | |
285 base::PlatformThread::Join(thread_1); | |
286 base::PlatformThread::Join(thread_2); | |
287 | |
288 size_t expected_total = kCount * 2; | |
289 size_t total = 0; | |
290 for (size_t i = 0; i < kCount; ++i) { | |
291 // Since for each signal, we process all signaled events, the shared data | |
292 // variables should all be equal. | |
293 EXPECT_EQ(expected_total / kCount, data[i]); | |
294 total += data[i]; | |
295 } | |
296 EXPECT_EQ(expected_total, total); | |
297 } | |
298 | |
299 // Test the maximum number of synchronizers without spinning further wait | |
300 // threads on Windows. This test assumes we can always create 64 pairs and | |
301 // bails if we can't. | |
302 TEST(SharedMemSynchronizer, MultipleWaits64) { | |
303 const size_t kCount = 64; | |
304 SharedMemSynchronizer a[kCount], b[kCount]; | |
305 for (size_t i = 0; i < kCount; ++i) { | |
306 ASSERT_TRUE(SharedMemSynchronizer::InitializePair(&a[i], &b[i])); | |
307 } | |
308 | |
309 SharedMemSynchronizer::SynchronizerVector b_vector; | |
310 PopulateVectorFromStackArray(&b_vector, &b[0], kCount); | |
311 SharedMemSynchronizer::WaitForMultiple waiter(&b_vector); | |
312 for (size_t i = 0; i < kCount; ++i) { | |
313 a[i].Signal(); | |
314 int index = waiter.Wait(); | |
315 EXPECT_EQ(i, static_cast<size_t>(index)); | |
316 } | |
317 } | |
318 | |
319 // Tests waiting for more synchronizers than the OS supports on one thread. | |
320 // The test will create at most 1000 pairs, but on mac/linux bots the actual | |
321 // number will be lower. See comment about the RLIMIT_NOFILE limit above for | |
322 // more details. | |
323 TEST(SharedMemSynchronizer, MultipleWaits1000) { | |
324 // A 1000 synchronizers requires 16 threads on Windows, including the current | |
325 // one, to perform the wait operation. | |
326 const size_t kCount = 1000; | |
327 SharedMemSynchronizer a[kCount], b[kCount]; | |
328 size_t count = 0; | |
329 for (size_t i = 0; i < kCount; ++i) { | |
330 if (!SharedMemSynchronizer::InitializePair(&a[i], &b[i])) { | |
331 LOG(WARNING) << "SharedMemSynchronizer::InitializePair failed at " << i; | |
332 break; | |
333 } | |
334 ++count; | |
335 } | |
336 | |
337 SharedMemSynchronizer::SynchronizerVector b_vector; | |
338 PopulateVectorFromStackArray(&b_vector, &b[0], count); | |
339 | |
340 for (size_t i = 0; i < count; ++i) { | |
341 a[i].Signal(); | |
342 // To disable the load distribution algorithm and force the extra worker | |
343 // thread(s) to catch the signaled event, we define the |waiter| inside | |
344 // the loop. | |
345 SharedMemSynchronizer::WaitForMultiple waiter(&b_vector); | |
346 int index = waiter.Wait(); | |
347 EXPECT_EQ(i, static_cast<size_t>(index)); | |
348 } | |
349 } | |
350 | |
351 class SharedMemSynchronizerMultiProcessTest : public base::MultiProcessTest { | |
352 public: | |
353 static const char kSharedMemName[]; | |
354 static const size_t kSharedMemSize = 1024; | |
355 | |
356 protected: | |
357 virtual void SetUp() OVERRIDE { | |
358 base::MultiProcessTest::SetUp(); | |
359 } | |
360 | |
361 virtual void TearDown() OVERRIDE { | |
362 base::MultiProcessTest::TearDown(); | |
363 } | |
364 }; | |
365 | |
366 // static | |
367 const char SharedMemSynchronizerMultiProcessTest::kSharedMemName[] = | |
368 "SharedMemSynchronizerMultiProcessTest"; | |
369 | |
370 namespace { | |
371 // A very crude IPC mechanism that we use to set up the spawned child process | |
372 // and the parent process. | |
373 struct CrudeIpc { | |
374 uint8 ready; | |
375 SharedMemSynchronizer::IPCHandle handle_1; | |
376 SharedMemSynchronizer::IPCHandle handle_2; | |
377 }; | |
378 } // end namespace | |
379 | |
380 // The main routine of the child process. Waits for the parent process | |
381 // to copy handles over to the child and then uses a SharedMemSynchronizer to | |
382 // wait and signal to the parent process. | |
383 MULTIPROCESS_TEST_MAIN(SharedMemSynchronizerChildMain) { | |
384 base::SharedMemory mem; | |
385 bool ok = mem.CreateNamed( | |
386 SharedMemSynchronizerMultiProcessTest::kSharedMemName, | |
387 true, | |
388 SharedMemSynchronizerMultiProcessTest::kSharedMemSize); | |
389 DCHECK(ok); | |
390 if (!ok) { | |
391 LOG(ERROR) << "Failed to open shared memory segment."; | |
392 return -1; | |
393 } | |
394 | |
395 mem.Map(SharedMemSynchronizerMultiProcessTest::kSharedMemSize); | |
396 CrudeIpc* ipc = reinterpret_cast<CrudeIpc*>(mem.memory()); | |
397 | |
398 while (!ipc->ready) | |
399 base::PlatformThread::Sleep(10); | |
400 | |
401 SharedMemSynchronizer synchronizer(ipc->handle_1, ipc->handle_2); | |
402 synchronizer.Wait(); | |
403 synchronizer.Signal(); | |
404 | |
405 return 0; | |
406 } | |
407 | |
408 // Spawns a new process and hands a SharedMemSynchronizer instance to the | |
409 // new process. Once that's done, it waits for the child process to signal | |
410 // it's end and quits. | |
411 TEST_F(SharedMemSynchronizerMultiProcessTest, Basic) { | |
412 base::SharedMemory mem; | |
413 mem.Delete(kSharedMemName); // In case a previous run was unsuccessful. | |
414 bool ok = mem.CreateNamed(kSharedMemName, false, kSharedMemSize); | |
415 ASSERT_TRUE(ok); | |
416 | |
417 ASSERT_TRUE(mem.Map(kSharedMemSize)); | |
418 | |
419 SharedMemSynchronizer a, b; | |
420 ASSERT_TRUE(SharedMemSynchronizer::InitializePair(&a, &b)); | |
421 EXPECT_TRUE(a.IsValid()); | |
422 EXPECT_TRUE(b.IsValid()); | |
423 | |
424 CrudeIpc* ipc = reinterpret_cast<CrudeIpc*>(mem.memory()); | |
425 ipc->ready = false; | |
426 | |
427 #if defined(OS_POSIX) | |
428 const int kPosixChildSocket = 20; | |
429 EXPECT_TRUE(b.ShareToProcess(NULL, &ipc->handle_1, &ipc->handle_2)); | |
430 base::FileHandleMappingVector fd_mapping_vec; | |
431 fd_mapping_vec.push_back(std::pair<int, int>(ipc->handle_1.fd, | |
432 kPosixChildSocket)); | |
433 ipc->handle_1.fd = kPosixChildSocket; | |
434 base::ProcessHandle process = SpawnChild("SharedMemSynchronizerChildMain", | |
435 fd_mapping_vec, false); | |
436 #else | |
437 base::ProcessHandle process = SpawnChild("SharedMemSynchronizerChildMain", | |
438 false); | |
439 EXPECT_TRUE(b.ShareToProcess(process, &ipc->handle_1, &ipc->handle_2)); | |
440 #endif | |
441 | |
442 ipc->ready = true; | |
443 | |
444 a.Signal(); | |
445 a.Wait(); | |
446 | |
447 int exit_code = -1; | |
448 base::WaitForExitCode(process, &exit_code); | |
449 EXPECT_EQ(0, exit_code); | |
450 } | |
OLD | NEW |