Chromium Code Reviews| Index: content/renderer/media/audio_track_recorder_unittest.cc |
| diff --git a/content/renderer/media/audio_track_recorder_unittest.cc b/content/renderer/media/audio_track_recorder_unittest.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..a7435bc5e1b183f1527193bb2fc32bd140a30978 |
| --- /dev/null |
| +++ b/content/renderer/media/audio_track_recorder_unittest.cc |
| @@ -0,0 +1,174 @@ |
| +// Copyright 2015 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "content/renderer/media/audio_track_recorder.h" |
| + |
| +#include "base/run_loop.h" |
| +#include "base/strings/utf_string_conversions.h" |
| +#include "content/renderer/media/media_stream_audio_source.h" |
| +#include "content/renderer/media/mock_media_constraint_factory.h" |
| +#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h" |
| +#include "content/renderer/media/webrtc_local_audio_track.h" |
| +#include "media/audio/simple_sources.h" |
| +#include "testing/gmock/include/gmock/gmock.h" |
| +#include "testing/gtest/include/gtest/gtest.h" |
| + |
| +using ::testing::_; |
| +using ::testing::DoAll; |
| +using ::testing::InSequence; |
| +using ::testing::Mock; |
| +using ::testing::Return; |
| +using ::testing::SaveArg; |
| + |
| +namespace { |
| + |
| +// Input audio format. |
| +const media::AudioParameters::Format kInputFormat = |
| + media::AudioParameters::AUDIO_PCM_LOW_LATENCY; |
| +const int kNumChannels = 1; |
| +const int kBitsPerSample = 16; |
| +const int kSamplingRate = 48000; |
| +const int kFramesPerBuffer = 480; |
| + |
| +} // namespace |
| + |
| +namespace content { |
| + |
| +ACTION_P(RunClosure, closure) { |
| + closure.Run(); |
| +} |
| + |
| +class EncodedAudioHandlerInterface { |
| + public: |
| + virtual void OnEncodedAudio(const media::AudioParameters& params, |
| + scoped_ptr<std::string> encoded_data, |
| + base::TimeTicks timestamp) = 0; |
| + virtual ~EncodedAudioHandlerInterface() {} |
| +}; |
| + |
| +class AudioTrackRecorderTest : public testing::Test, |
| + public EncodedAudioHandlerInterface { |
|
mcasas
2015/10/19 20:02:09
Actually you don't need to define this interface/
ajose
2015/10/20 03:21:12
Done.
|
| + public: |
| + AudioTrackRecorderTest() |
| + : params1_(kInputFormat, |
| + media::CHANNEL_LAYOUT_MONO, |
| + kSamplingRate, |
| + kBitsPerSample, |
| + kFramesPerBuffer), |
| + params2_(kInputFormat, |
| + media::CHANNEL_LAYOUT_STEREO, |
| + kSamplingRate, |
| + kBitsPerSample, |
| + kFramesPerBuffer), |
| + source_(kNumChannels, 440, kSamplingRate) { |
| + PrepareBlinkTrackOfType(MEDIA_DEVICE_AUDIO_CAPTURE); |
| + audio_track_recorder_.reset(new AudioTrackRecorder( |
| + blink_track_, base::Bind(&AudioTrackRecorderTest::OnEncodedAudio, |
| + base::Unretained(this)))); |
| + } |
| + |
| + scoped_ptr<media::AudioBus> NextAudioBus(const base::TimeDelta& duration) { |
| + const int num_samples = static_cast<int>((kSamplingRate * duration) / |
| + base::TimeDelta::FromSeconds(1)); |
| + scoped_ptr<media::AudioBus> bus( |
| + media::AudioBus::Create(kNumChannels, num_samples)); |
| + source_.OnMoreData(bus.get(), 0); |
| + return bus.Pass(); |
| + } |
| + |
| + MOCK_METHOD3(DoOnEncodedAudio, |
| + void(const media::AudioParameters& params, |
| + std::string encoded_data, |
| + base::TimeTicks timestamp)); |
| + |
| + void OnEncodedAudio(const media::AudioParameters& params, |
| + scoped_ptr<std::string> encoded_data, |
| + base::TimeTicks timestamp) { |
| + EXPECT_TRUE(!encoded_data->empty()); |
| + DoOnEncodedAudio(params, *encoded_data, timestamp); |
| + } |
| + |
| + const base::MessageLoop message_loop_; |
| + |
| + // ATR and WebMediaStreamTrack for fooling it. |
| + scoped_ptr<AudioTrackRecorder> audio_track_recorder_; |
| + blink::WebMediaStreamTrack blink_track_; |
| + |
| + // Two different sets of AudioParameters for testing re-init of ATR. |
| + media::AudioParameters params1_; |
| + media::AudioParameters params2_; |
| + |
| + // AudioSource for creating AudioBuses. |
| + media::SineWaveAudioSource source_; |
| + |
| + private: |
| + // Prepares a blink track of a given MediaStreamType and attaches the native |
| + // track, which can be used to capture audio data and pass it to the producer. |
| + // Taken from media::SpeechRecognitionAudioSinkTest |
| + void PrepareBlinkTrackOfType(const MediaStreamType device_type) { |
| + StreamDeviceInfo device_info(device_type, "Mock device", "mock_device_id"); |
| + MockMediaConstraintFactory constraint_factory; |
| + const blink::WebMediaConstraints constraints = |
| + constraint_factory.CreateWebMediaConstraints(); |
| + scoped_refptr<WebRtcAudioCapturer> capturer( |
| + WebRtcAudioCapturer::CreateCapturer(-1, device_info, constraints, NULL, |
| + NULL)); |
| + scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter( |
| + WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL)); |
| + scoped_ptr<WebRtcLocalAudioTrack> native_track( |
| + new WebRtcLocalAudioTrack(adapter.get(), capturer, NULL)); |
| + blink::WebMediaStreamSource blink_audio_source; |
| + blink_audio_source.initialize(base::UTF8ToUTF16("dummy_source_id"), |
| + blink::WebMediaStreamSource::TypeAudio, |
| + base::UTF8ToUTF16("dummy_source_name"), |
| + false /* remote */, true /* readonly */); |
| + MediaStreamSource::SourceStoppedCallback cb; |
| + blink_audio_source.setExtraData( |
| + new MediaStreamAudioSource(-1, device_info, cb, NULL)); |
| + blink_track_.initialize(blink::WebString::fromUTF8("dummy_track"), |
| + blink_audio_source); |
| + blink_track_.setExtraData(native_track.release()); |
| + } |
| + |
| + DISALLOW_COPY_AND_ASSIGN(AudioTrackRecorderTest); |
| +}; |
| + |
| +TEST_F(AudioTrackRecorderTest, OnSetFormat) { |
| + audio_track_recorder_->OnSetFormat(params1_); |
|
mcasas
2015/10/19 20:02:09
What's the point of this?
Suggestion: You can add
ajose
2015/10/20 03:21:12
Acknowledged.
|
| +} |
| + |
| +TEST_F(AudioTrackRecorderTest, OnData) { |
| + audio_track_recorder_->OnSetFormat(params1_); |
| + InSequence s; |
| + base::RunLoop run_loop; |
| + base::Closure quit_closure = run_loop.QuitClosure(); |
| + |
| + // TODO(ajose): consider adding WillOnce(SaveArg...) and inspecting, as done |
| + // in VTR unittests. |
| + // TODO(ajose): Using 10ms chunks due to hard-coded 100fps framerate. |
| + // Need to figure out what to do about framerate. |
| + const base::TimeTicks time1 = base::TimeTicks::Now(); |
| + EXPECT_CALL(*this, DoOnEncodedAudio(_, _, time1)).Times(1); |
| + audio_track_recorder_->OnData( |
| + *NextAudioBus(base::TimeDelta::FromMilliseconds(10)), time1); |
| + |
| + // Send more audio. |
| + const base::TimeTicks time2 = base::TimeTicks::Now(); |
| + EXPECT_CALL(*this, DoOnEncodedAudio(_, _, _)).Times(1); |
| + audio_track_recorder_->OnData( |
| + *NextAudioBus(base::TimeDelta::FromMilliseconds(10)), time2); |
| + |
| + // Send audio with different params to force ATR to re-init. |
| + const base::TimeTicks time3 = base::TimeTicks::Now(); |
| + EXPECT_CALL(*this, DoOnEncodedAudio(_, _, _)) |
| + .Times(1) |
| + .WillOnce(RunClosure(quit_closure)); |
| + audio_track_recorder_->OnData( |
| + *NextAudioBus(base::TimeDelta::FromMilliseconds(10)), time3); |
| + |
| + run_loop.Run(); |
| + Mock::VerifyAndClearExpectations(this); |
| +} |
| + |
| +} // namespace content |