Index: media/filters/audio_renderer_algorithm_base_unittest.cc |
diff --git a/media/filters/audio_renderer_algorithm_base_unittest.cc b/media/filters/audio_renderer_algorithm_base_unittest.cc |
index 75d8c52cd3be9819fe6d76d269eb9eec81ac5bfc..77c7761cdc6044c61ae97f6d4d71d501ee8277d4 100644 |
--- a/media/filters/audio_renderer_algorithm_base_unittest.cc |
+++ b/media/filters/audio_renderer_algorithm_base_unittest.cc |
@@ -8,136 +8,280 @@ |
// correct rate. We always pass in a very large destination buffer with the |
// expectation that FillBuffer() will fill as much as it can but no more. |
+#include <cmath> |
+ |
#include "base/bind.h" |
#include "base/callback.h" |
#include "media/base/data_buffer.h" |
#include "media/filters/audio_renderer_algorithm_base.h" |
-#include "testing/gmock/include/gmock/gmock.h" |
#include "testing/gtest/include/gtest/gtest.h" |
-using ::testing::AnyNumber; |
+static const size_t kRawDataSize = 10 * 1024; |
+static const int kSamplesPerSecond = 44100; |
+static const int kDefaultChannels = 2; |
+static const int kDefaultSampleBits = 16; |
namespace media { |
-static const int kChannels = 1; |
-static const int kSampleRate = 1000; |
-static const int kSampleBits = 8; |
- |
-TEST(AudioRendererAlgorithmBaseTest, FillBuffer_NormalRate) { |
- // When playback rate == 1.0f: straight copy of whatever is in |queue_|. |
- AudioRendererAlgorithmBase algorithm; |
- algorithm.Initialize(kChannels, kSampleRate, kSampleBits, 1.0f, |
- base::Bind(&base::DoNothing)); |
- |
- // Enqueue a buffer of any size since it doesn't matter. |
- const size_t kDataSize = 1024; |
- algorithm.EnqueueBuffer(new DataBuffer( |
- scoped_array<uint8>(new uint8[kDataSize]), kDataSize)); |
- EXPECT_EQ(kDataSize, algorithm.bytes_buffered()); |
- |
- // Read the same sized amount. |
- scoped_array<uint8> data(new uint8[kDataSize]); |
- EXPECT_EQ(kDataSize, algorithm.FillBuffer(data.get(), kDataSize)); |
- EXPECT_EQ(0u, algorithm.bytes_buffered()); |
-} |
+class AudioRendererAlgorithmBaseTest : public testing::Test { |
+ public: |
+ AudioRendererAlgorithmBaseTest() |
+ : bytes_enqueued_(0) { |
+ } |
+ |
+ ~AudioRendererAlgorithmBaseTest() {} |
-TEST(AudioRendererAlgorithmBaseTest, FillBuffer_DoubleRate) { |
- // When playback rate > 1.0f: input is read faster than output is written. |
- AudioRendererAlgorithmBase algorithm; |
- algorithm.Initialize(kChannels, kSampleRate, kSampleBits, 2.0f, |
- base::Bind(&base::DoNothing)); |
- |
- // First parameter is the input buffer size, second parameter is how much data |
- // we expect to consume in order to have no data left in the |algorithm|. |
- // |
- // For rate == 0.5f, reading half the input size should consume all enqueued |
- // data. |
- const size_t kBufferSize = 16 * 1024; |
- scoped_array<uint8> data(new uint8[kBufferSize]); |
- const size_t kTestData[][2] = { |
- { algorithm.window_size(), algorithm.window_size() / 2}, |
- { algorithm.window_size() / 2, algorithm.window_size() / 4}, |
- { 4u, 2u }, |
- { 0u, 0u }, |
- }; |
- |
- for (size_t i = 0u; i < arraysize(kTestData); ++i) { |
- const size_t kDataSize = kTestData[i][0]; |
- algorithm.EnqueueBuffer(new DataBuffer( |
- scoped_array<uint8>(new uint8[kDataSize]), kDataSize)); |
- EXPECT_EQ(kDataSize, algorithm.bytes_buffered()); |
- |
- const size_t kExpectedSize = kTestData[i][1]; |
- ASSERT_LE(kExpectedSize, kBufferSize); |
- EXPECT_EQ(kExpectedSize, algorithm.FillBuffer(data.get(), kBufferSize)); |
- EXPECT_EQ(0u, algorithm.bytes_buffered()); |
+ void Initialize() { |
+ Initialize(kDefaultChannels, kDefaultSampleBits); |
} |
-} |
-TEST(AudioRendererAlgorithmBaseTest, FillBuffer_HalfRate) { |
- // When playback rate < 1.0f: input is read slower than output is written. |
- AudioRendererAlgorithmBase algorithm; |
- algorithm.Initialize(kChannels, kSampleRate, kSampleBits, 0.5f, |
- base::Bind(&base::DoNothing)); |
- |
- // First parameter is the input buffer size, second parameter is how much data |
- // we expect to consume in order to have no data left in the |algorithm|. |
- // |
- // For rate == 0.5f, reading double the input size should consume all enqueued |
- // data. |
- const size_t kBufferSize = 16 * 1024; |
- scoped_array<uint8> data(new uint8[kBufferSize]); |
- const size_t kTestData[][2] = { |
- { algorithm.window_size(), algorithm.window_size() * 2 }, |
- { algorithm.window_size() / 2, algorithm.window_size() }, |
- { 2u, 4u }, |
- { 0u, 0u }, |
- }; |
- |
- for (size_t i = 0u; i < arraysize(kTestData); ++i) { |
- const size_t kDataSize = kTestData[i][0]; |
- algorithm.EnqueueBuffer(new DataBuffer( |
- scoped_array<uint8>(new uint8[kDataSize]), kDataSize)); |
- EXPECT_EQ(kDataSize, algorithm.bytes_buffered()); |
- |
- const size_t kExpectedSize = kTestData[i][1]; |
- ASSERT_LE(kExpectedSize, kBufferSize); |
- EXPECT_EQ(kExpectedSize, algorithm.FillBuffer(data.get(), kBufferSize)); |
- EXPECT_EQ(0u, algorithm.bytes_buffered()); |
+ void Initialize(int channels, int bits_per_channel) { |
+ algorithm_.Initialize( |
+ channels, kSamplesPerSecond, bits_per_channel, 1.0f, |
+ base::Bind(&AudioRendererAlgorithmBaseTest::EnqueueData, |
+ base::Unretained(this))); |
+ EnqueueData(); |
+ } |
+ |
+ void EnqueueData() { |
+ scoped_array<uint8> audio_data(new uint8[kRawDataSize]); |
+ CHECK_EQ(kRawDataSize % algorithm_.bytes_per_channel(), 0u); |
+ CHECK_EQ(kRawDataSize % algorithm_.bytes_per_frame(), 0u); |
+ size_t length = kRawDataSize / algorithm_.bytes_per_channel(); |
+ switch (algorithm_.bytes_per_channel()) { |
+ case 4: |
+ WriteFakeData<int32>(audio_data.get(), length); |
+ break; |
+ case 2: |
+ WriteFakeData<int16>(audio_data.get(), length); |
+ break; |
+ case 1: |
+ WriteFakeData<uint8>(audio_data.get(), length); |
+ break; |
+ default: |
+ NOTREACHED() << "Unsupported audio bit depth in crossfade."; |
+ } |
+ algorithm_.EnqueueBuffer(new DataBuffer(audio_data.Pass(), kRawDataSize)); |
+ bytes_enqueued_ += kRawDataSize; |
+ } |
+ |
+ template <class Type> |
+ void WriteFakeData(uint8* audio_data, size_t length) { |
+ Type* output = reinterpret_cast<Type*>(audio_data); |
+ for (size_t i = 0; i < length; i++) { |
+ // The value of the data is meaningless; we just want non-zero data to |
+ // differentiate it from muted data. |
+ output[i] = i % 5 + 10; |
+ } |
+ } |
+ |
+ void CheckFakeData(uint8* audio_data, int frames_written, |
+ double playback_rate) { |
+ size_t length = |
+ (frames_written * algorithm_.bytes_per_frame()) |
+ / algorithm_.bytes_per_channel(); |
+ |
+ switch (algorithm_.bytes_per_channel()) { |
+ case 4: |
+ DoCheckFakeData<int32>(audio_data, length); |
+ break; |
+ case 2: |
+ DoCheckFakeData<int16>(audio_data, length); |
+ break; |
+ case 1: |
+ DoCheckFakeData<uint8>(audio_data, length); |
+ break; |
+ default: |
+ NOTREACHED() << "Unsupported audio bit depth in crossfade."; |
+ } |
+ } |
+ |
+ template <class Type> |
+ void DoCheckFakeData(uint8* audio_data, size_t length) { |
+ Type* output = reinterpret_cast<Type*>(audio_data); |
+ for (size_t i = 0; i < length; i++) { |
+ EXPECT_TRUE(algorithm_.is_muted() || output[i] != 0); |
+ } |
} |
-} |
-TEST(AudioRendererAlgorithmBaseTest, FillBuffer_QuarterRate) { |
- // When playback rate is very low the audio is simply muted. |
- AudioRendererAlgorithmBase algorithm; |
- algorithm.Initialize(kChannels, kSampleRate, kSampleBits, 0.25f, |
- base::Bind(&base::DoNothing)); |
- |
- // First parameter is the input buffer size, second parameter is how much data |
- // we expect to consume in order to have no data left in the |algorithm|. |
- // |
- // For rate == 0.25f, reading four times the input size should consume all |
- // enqueued data but without executing OLA. |
- const size_t kBufferSize = 16 * 1024; |
- scoped_array<uint8> data(new uint8[kBufferSize]); |
- const size_t kTestData[][2] = { |
- { algorithm.window_size(), algorithm.window_size() * 4}, |
- { algorithm.window_size() / 2, algorithm.window_size() * 2}, |
- { 1u, 4u }, |
- { 0u, 0u }, |
- }; |
- |
- for (size_t i = 0u; i < arraysize(kTestData); ++i) { |
- const size_t kDataSize = kTestData[i][0]; |
- algorithm.EnqueueBuffer(new DataBuffer(scoped_array<uint8>( |
- new uint8[kDataSize]), kDataSize)); |
- EXPECT_EQ(kDataSize, algorithm.bytes_buffered()); |
- |
- const size_t kExpectedSize = kTestData[i][1]; |
- ASSERT_LE(kExpectedSize, kBufferSize); |
- EXPECT_EQ(kExpectedSize, algorithm.FillBuffer(data.get(), kBufferSize)); |
- EXPECT_EQ(0u, algorithm.bytes_buffered()); |
+ int ComputeConsumedBytes(int initial_bytes_enqueued, |
+ int initial_bytes_buffered) { |
+ int byte_delta = bytes_enqueued_ - initial_bytes_enqueued; |
+ int buffered_delta = algorithm_.bytes_buffered() - initial_bytes_buffered; |
+ int consumed = byte_delta - buffered_delta; |
+ CHECK_GE(consumed, 0); |
+ return consumed; |
} |
+ |
+ void TestPlaybackRate(double playback_rate) { |
+ static const int kDefaultBufferSize = kSamplesPerSecond / 10; |
+ static const int kDefaultFramesRequested = 5 * kSamplesPerSecond; |
+ |
+ TestPlaybackRate(playback_rate, kDefaultBufferSize, |
+ kDefaultFramesRequested); |
+ } |
+ |
+ void TestPlaybackRate(double playback_rate, |
+ int buffer_size_in_frames, |
+ int total_frames_requested) { |
+ int initial_bytes_enqueued = bytes_enqueued_; |
+ int initial_bytes_buffered = algorithm_.bytes_buffered(); |
+ |
+ algorithm_.SetPlaybackRate(static_cast<float>(playback_rate)); |
+ |
+ scoped_array<uint8> buffer( |
+ new uint8[buffer_size_in_frames * algorithm_.bytes_per_frame()]); |
+ |
+ if (playback_rate == 0.0) { |
+ int frames_written = |
+ algorithm_.FillBuffer(buffer.get(), buffer_size_in_frames); |
+ EXPECT_EQ(0, frames_written); |
+ return; |
+ } |
+ |
+ int frames_remaining = total_frames_requested; |
+ while (frames_remaining > 0) { |
+ int frames_requested = std::min(buffer_size_in_frames, frames_remaining); |
+ int frames_written = |
+ algorithm_.FillBuffer(buffer.get(), frames_requested); |
+ CHECK_GT(frames_written, 0); |
+ CheckFakeData(buffer.get(), frames_written, playback_rate); |
+ frames_remaining -= frames_written; |
+ } |
+ |
+ int bytes_requested = total_frames_requested * algorithm_.bytes_per_frame(); |
+ int bytes_consumed = ComputeConsumedBytes(initial_bytes_enqueued, |
+ initial_bytes_buffered); |
+ |
+ // If playing back at normal speed, we should always get back the same |
+ // number of bytes requested. |
+ if (playback_rate == 1.0) { |
+ EXPECT_EQ(bytes_requested, bytes_consumed); |
+ return; |
+ } |
+ |
+ // Otherwise, allow |kMaxAcceptableDelta| difference between the target and |
+ // actual playback rate. |
+ // When |kSamplesPerSecond| and |total_frames_requested| are reasonably |
+ // large, one can expect less than a 1% difference in most cases. In our |
+ // current implementation, sped up playback is less accurate than slowed |
+ // down playback, and for playback_rate > 1, playback rate generally gets |
+ // less and less accurate the farther it drifts from 1 (though this is |
+ // nonlinear). |
+ static const double kMaxAcceptableDelta = 0.01; |
+ double actual_playback_rate = 1.0 * bytes_consumed / bytes_requested; |
+ |
+ // Calculate the percentage difference from the target |playback_rate| as a |
+ // fraction from 0.0 to 1.0. |
+ double delta = std::abs(1.0 - (actual_playback_rate / playback_rate)); |
+ |
+ EXPECT_LE(delta, kMaxAcceptableDelta); |
+ } |
+ |
+ protected: |
+ AudioRendererAlgorithmBase algorithm_; |
+ int bytes_enqueued_; |
+}; |
+ |
+TEST_F(AudioRendererAlgorithmBaseTest, FillBuffer_NormalRate) { |
+ Initialize(); |
+ TestPlaybackRate(1.0); |
+} |
+ |
+TEST_F(AudioRendererAlgorithmBaseTest, FillBuffer_OneAndAQuarterRate) { |
+ Initialize(); |
+ TestPlaybackRate(1.25); |
+} |
+ |
+TEST_F(AudioRendererAlgorithmBaseTest, FillBuffer_OneAndAHalfRate) { |
+ Initialize(); |
+ TestPlaybackRate(1.5); |
+} |
+ |
+TEST_F(AudioRendererAlgorithmBaseTest, FillBuffer_DoubleRate) { |
+ Initialize(); |
+ TestPlaybackRate(2.0); |
+} |
+ |
+TEST_F(AudioRendererAlgorithmBaseTest, FillBuffer_EightTimesRate) { |
+ Initialize(); |
+ TestPlaybackRate(8.0); |
+} |
+ |
+TEST_F(AudioRendererAlgorithmBaseTest, FillBuffer_ThreeQuartersRate) { |
+ Initialize(); |
+ TestPlaybackRate(0.75); |
+} |
+ |
+TEST_F(AudioRendererAlgorithmBaseTest, FillBuffer_HalfRate) { |
+ Initialize(); |
+ TestPlaybackRate(0.5); |
+} |
+ |
+TEST_F(AudioRendererAlgorithmBaseTest, FillBuffer_QuarterRate) { |
+ Initialize(); |
+ TestPlaybackRate(0.25); |
+} |
+ |
+TEST_F(AudioRendererAlgorithmBaseTest, FillBuffer_Pause) { |
+ Initialize(); |
+ TestPlaybackRate(0.0); |
+} |
+ |
+TEST_F(AudioRendererAlgorithmBaseTest, FillBuffer_SlowDown) { |
+ Initialize(); |
+ TestPlaybackRate(4.5); |
+ TestPlaybackRate(3.0); |
+ TestPlaybackRate(2.0); |
+ TestPlaybackRate(1.0); |
+ TestPlaybackRate(0.5); |
+ TestPlaybackRate(0.25); |
+} |
+ |
+TEST_F(AudioRendererAlgorithmBaseTest, FillBuffer_SpeedUp) { |
+ Initialize(); |
+ TestPlaybackRate(0.25); |
+ TestPlaybackRate(0.5); |
+ TestPlaybackRate(1.0); |
+ TestPlaybackRate(2.0); |
+ TestPlaybackRate(3.0); |
+ TestPlaybackRate(4.5); |
+} |
+ |
+TEST_F(AudioRendererAlgorithmBaseTest, FillBuffer_JumpAroundSpeeds) { |
+ Initialize(); |
+ TestPlaybackRate(2.1); |
+ TestPlaybackRate(0.9); |
+ TestPlaybackRate(0.6); |
+ TestPlaybackRate(1.4); |
+ TestPlaybackRate(0.3); |
+} |
+ |
+TEST_F(AudioRendererAlgorithmBaseTest, FillBuffer_SmallBufferSize) { |
+ Initialize(); |
+ static const int kBufferSizeInFrames = 1; |
+ static const int kFramesRequested = 2 * kSamplesPerSecond; |
+ TestPlaybackRate(1.0, kBufferSizeInFrames, kFramesRequested); |
+ TestPlaybackRate(0.5, kBufferSizeInFrames, kFramesRequested); |
+ TestPlaybackRate(1.5, kBufferSizeInFrames, kFramesRequested); |
+} |
+ |
+TEST_F(AudioRendererAlgorithmBaseTest, FillBuffer_LowerQualityAudio) { |
+ static const int kChannels = 1; |
+ static const int kSampleBits = 8; |
+ Initialize(kChannels, kSampleBits); |
+ TestPlaybackRate(1.0); |
+ TestPlaybackRate(0.5); |
+ TestPlaybackRate(1.5); |
+} |
+ |
+TEST_F(AudioRendererAlgorithmBaseTest, FillBuffer_HigherQualityAudio) { |
+ static const int kChannels = 2; |
+ static const int kSampleBits = 32; |
+ Initialize(kChannels, kSampleBits); |
+ TestPlaybackRate(1.0); |
+ TestPlaybackRate(0.5); |
+ TestPlaybackRate(1.5); |
} |
} // namespace media |