| Index: content/renderer/media/webrtc_audio_device_impl.cc
|
| diff --git a/content/renderer/media/webrtc_audio_device_impl.cc b/content/renderer/media/webrtc_audio_device_impl.cc
|
| index 1293674e14c2e0c2a4395694068430d71ab4d277..fefb10efb8b7f8ea5cb63bdf68425b405b3c1773 100644
|
| --- a/content/renderer/media/webrtc_audio_device_impl.cc
|
| +++ b/content/renderer/media/webrtc_audio_device_impl.cc
|
| @@ -32,12 +32,6 @@ WebRtcAudioDeviceImpl::WebRtcAudioDeviceImpl()
|
| : ref_count_(0),
|
| render_loop_(base::MessageLoopProxy::current()),
|
| audio_transport_callback_(NULL),
|
| - input_buffer_size_(0),
|
| - output_buffer_size_(0),
|
| - input_channels_(0),
|
| - output_channels_(0),
|
| - input_sample_rate_(0),
|
| - output_sample_rate_(0),
|
| input_delay_ms_(0),
|
| output_delay_ms_(0),
|
| last_error_(AudioDeviceModule::kAdmErrNone),
|
| @@ -78,7 +72,7 @@ size_t WebRtcAudioDeviceImpl::Render(
|
| const std::vector<float*>& audio_data,
|
| size_t number_of_frames,
|
| size_t audio_delay_milliseconds) {
|
| - DCHECK_LE(number_of_frames, output_buffer_size_);
|
| + DCHECK_LE(number_of_frames, output_buffer_size());
|
|
|
| {
|
| base::AutoLock auto_lock(lock_);
|
| @@ -87,9 +81,9 @@ size_t WebRtcAudioDeviceImpl::Render(
|
| }
|
|
|
| const int channels = audio_data.size();
|
| - DCHECK_LE(channels, output_channels_);
|
| + DCHECK_LE(channels, output_channels());
|
|
|
| - int samples_per_sec = static_cast<int>(output_sample_rate_);
|
| + int samples_per_sec = output_sample_rate();
|
| if (samples_per_sec == 44100) {
|
| // Even if the hardware runs at 44.1kHz, we use 44.0 internally.
|
| samples_per_sec = 44000;
|
| @@ -142,7 +136,7 @@ void WebRtcAudioDeviceImpl::Capture(
|
| const std::vector<float*>& audio_data,
|
| size_t number_of_frames,
|
| size_t audio_delay_milliseconds) {
|
| - DCHECK_LE(number_of_frames, input_buffer_size_);
|
| + DCHECK_LE(number_of_frames, input_buffer_size());
|
|
|
| int output_delay_ms = 0;
|
| {
|
| @@ -153,7 +147,7 @@ void WebRtcAudioDeviceImpl::Capture(
|
| }
|
|
|
| const int channels = audio_data.size();
|
| - DCHECK_LE(channels, input_channels_);
|
| + DCHECK_LE(channels, input_channels());
|
| uint32_t new_mic_level = 0;
|
|
|
| // Interleave, scale, and clip input to int16 and store result in
|
| @@ -162,7 +156,7 @@ void WebRtcAudioDeviceImpl::Capture(
|
| input_buffer_.get(),
|
| number_of_frames);
|
|
|
| - int samples_per_sec = static_cast<int>(input_sample_rate_);
|
| + int samples_per_sec = input_sample_rate();
|
| if (samples_per_sec == 44100) {
|
| // Even if the hardware runs at 44.1kHz, we use 44.0 internally.
|
| samples_per_sec = 44000;
|
| @@ -301,45 +295,44 @@ int32_t WebRtcAudioDeviceImpl::Init() {
|
|
|
| // Ask the browser for the default audio output hardware sample-rate.
|
| // This request is based on a synchronous IPC message.
|
| - int output_sample_rate =
|
| - static_cast<int>(audio_hardware::GetOutputSampleRate());
|
| - DVLOG(1) << "Audio output hardware sample rate: " << output_sample_rate;
|
| + int out_sample_rate = audio_hardware::GetOutputSampleRate();
|
| + DVLOG(1) << "Audio output hardware sample rate: " << out_sample_rate;
|
|
|
| // Verify that the reported output hardware sample rate is supported
|
| // on the current platform.
|
| if (std::find(&kValidOutputRates[0],
|
| &kValidOutputRates[0] + arraysize(kValidOutputRates),
|
| - output_sample_rate) ==
|
| + out_sample_rate) ==
|
| &kValidOutputRates[arraysize(kValidOutputRates)]) {
|
| - DLOG(ERROR) << output_sample_rate << " is not a supported output rate.";
|
| + DLOG(ERROR) << out_sample_rate << " is not a supported output rate.";
|
| return -1;
|
| }
|
|
|
| // Ask the browser for the default audio input hardware sample-rate.
|
| // This request is based on a synchronous IPC message.
|
| - int input_sample_rate =
|
| - static_cast<int>(audio_hardware::GetInputSampleRate());
|
| - DVLOG(1) << "Audio input hardware sample rate: " << input_sample_rate;
|
| + int in_sample_rate = audio_hardware::GetInputSampleRate();
|
| + DVLOG(1) << "Audio input hardware sample rate: " << in_sample_rate;
|
|
|
| // Verify that the reported input hardware sample rate is supported
|
| // on the current platform.
|
| if (std::find(&kValidInputRates[0],
|
| &kValidInputRates[0] + arraysize(kValidInputRates),
|
| - input_sample_rate) ==
|
| + in_sample_rate) ==
|
| &kValidInputRates[arraysize(kValidInputRates)]) {
|
| - DLOG(ERROR) << input_sample_rate << " is not a supported input rate.";
|
| + DLOG(ERROR) << in_sample_rate << " is not a supported input rate.";
|
| return -1;
|
| }
|
|
|
| // Ask the browser for the default number of audio input channels.
|
| // This request is based on a synchronous IPC message.
|
| - int input_channels = audio_hardware::GetInputChannelCount();
|
| - DVLOG(1) << "Audio input hardware channels: " << input_channels;
|
| + ChannelLayout input_channel_layout =
|
| + audio_hardware::GetInputChannelLayout();
|
| + DVLOG(1) << "Audio input hardware channels: " << input_channel_layout;
|
|
|
| - int output_channels = 0;
|
| -
|
| - size_t input_buffer_size = 0;
|
| - size_t output_buffer_size = 0;
|
| + ChannelLayout out_channel_layout = CHANNEL_LAYOUT_MONO;
|
| + AudioParameters::Format in_format = AudioParameters::AUDIO_PCM_LINEAR;
|
| + size_t in_buffer_size = 0;
|
| + size_t out_buffer_size = 0;
|
|
|
| // TODO(henrika): factor out all platform specific parts in separate
|
| // functions. Code is a bit messy right now.
|
| @@ -347,7 +340,10 @@ int32_t WebRtcAudioDeviceImpl::Init() {
|
| // Windows
|
| #if defined(OS_WIN)
|
| // Always use stereo rendering on Windows.
|
| - output_channels = 2;
|
| + out_channel_layout = CHANNEL_LAYOUT_STEREO;
|
| +
|
| + DVLOG(1) << "Using AUDIO_PCM_LOW_LATENCY as input mode on Windows.";
|
| + in_format = AudioParameters::AUDIO_PCM_LOW_LATENCY;
|
|
|
| // Capture side: AUDIO_PCM_LOW_LATENCY is based on the Core Audio (WASAPI)
|
| // API which was introduced in Windows Vista. For lower Windows versions,
|
| @@ -355,12 +351,12 @@ int32_t WebRtcAudioDeviceImpl::Init() {
|
| // size of 10ms works well for both these implementations.
|
|
|
| // Use different buffer sizes depending on the current hardware sample rate.
|
| - if (input_sample_rate == 44100) {
|
| + if (in_sample_rate == 44100) {
|
| // We do run at 44.1kHz at the actual audio layer, but ask for frames
|
| // at 44.0kHz to ensure that we can feed them to the webrtc::VoiceEngine.
|
| - input_buffer_size = 440;
|
| + in_buffer_size = 440;
|
| } else {
|
| - input_buffer_size = (input_sample_rate / 100);
|
| + in_buffer_size = (in_sample_rate / 100);
|
| }
|
|
|
| // Render side: AUDIO_PCM_LOW_LATENCY is based on the Core Audio (WASAPI)
|
| @@ -369,39 +365,42 @@ int32_t WebRtcAudioDeviceImpl::Init() {
|
| // size of 10ms works well for WASAPI but 30ms is needed for Wave.
|
|
|
| // Use different buffer sizes depending on the current hardware sample rate.
|
| - if (output_sample_rate == 96000 || output_sample_rate == 48000) {
|
| - output_buffer_size = (output_sample_rate / 100);
|
| + if (out_sample_rate == 96000 || out_sample_rate == 48000) {
|
| + out_buffer_size = (out_sample_rate / 100);
|
| } else {
|
| // We do run at 44.1kHz at the actual audio layer, but ask for frames
|
| // at 44.0kHz to ensure that we can feed them to the webrtc::VoiceEngine.
|
| // TODO(henrika): figure out why we seem to need 20ms here for glitch-
|
| // free audio.
|
| - output_buffer_size = 2 * 440;
|
| + out_buffer_size = 2 * 440;
|
| }
|
|
|
| // Windows XP and lower can't cope with 10 ms output buffer size.
|
| // It must be extended to 30 ms (60 ms will be used internally by WaveOut).
|
| if (!media::IsWASAPISupported()) {
|
| - output_buffer_size = 3 * output_buffer_size;
|
| + out_buffer_size = 3 * out_buffer_size;
|
| DLOG(WARNING) << "Extending the output buffer size by a factor of three "
|
| << "since Windows XP has been detected.";
|
| }
|
|
|
| // Mac OS X
|
| #elif defined(OS_MACOSX)
|
| - output_channels = 1;
|
| + out_channel_layout = CHANNEL_LAYOUT_MONO;
|
| +
|
| + DVLOG(1) << "Using AUDIO_PCM_LOW_LATENCY as input mode on Mac OS X.";
|
| + in_format = AudioParameters::AUDIO_PCM_LOW_LATENCY;
|
|
|
| // Capture side: AUDIO_PCM_LOW_LATENCY on Mac OS X is based on a callback-
|
| // driven Core Audio implementation. Tests have shown that 10ms is a suitable
|
| // frame size to use, both for 48kHz and 44.1kHz.
|
|
|
| // Use different buffer sizes depending on the current hardware sample rate.
|
| - if (input_sample_rate == 44100) {
|
| + if (in_sample_rate == 44100) {
|
| // We do run at 44.1kHz at the actual audio layer, but ask for frames
|
| // at 44.0kHz to ensure that we can feed them to the webrtc::VoiceEngine.
|
| - input_buffer_size = 440;
|
| + in_buffer_size = 440;
|
| } else {
|
| - input_buffer_size = (input_sample_rate / 100);
|
| + in_buffer_size = (in_sample_rate / 100);
|
| }
|
|
|
| // Render side: AUDIO_PCM_LOW_LATENCY on Mac OS X is based on a callback-
|
| @@ -409,25 +408,25 @@ int32_t WebRtcAudioDeviceImpl::Init() {
|
| // frame size to use, both for 48kHz and 44.1kHz.
|
|
|
| // Use different buffer sizes depending on the current hardware sample rate.
|
| - if (output_sample_rate == 48000) {
|
| - output_buffer_size = 480;
|
| + if (out_sample_rate == 48000) {
|
| + out_buffer_size = 480;
|
| } else {
|
| // We do run at 44.1kHz at the actual audio layer, but ask for frames
|
| // at 44.0kHz to ensure that we can feed them to the webrtc::VoiceEngine.
|
| - output_buffer_size = 440;
|
| + out_buffer_size = 440;
|
| }
|
| // Linux
|
| #elif defined(OS_LINUX) || defined(OS_OPENBSD)
|
| - input_channels = 2;
|
| - output_channels = 1;
|
| + input_channel_layout = CHANNEL_LAYOUT_STEREO;
|
| + out_channel_layout = CHANNEL_LAYOUT_MONO;
|
|
|
| // Based on tests using the current ALSA implementation in Chrome, we have
|
| // found that the best combination is 20ms on the input side and 10ms on the
|
| // output side.
|
| // TODO(henrika): It might be possible to reduce the input buffer
|
| // size and reduce the delay even more.
|
| - input_buffer_size = 2 * 480;
|
| - output_buffer_size = 480;
|
| + in_buffer_size = 2 * 480;
|
| + out_buffer_size = 480;
|
| #else
|
| DLOG(ERROR) << "Unsupported platform";
|
| return -1;
|
| @@ -435,21 +434,20 @@ int32_t WebRtcAudioDeviceImpl::Init() {
|
|
|
| // Store utilized parameters to ensure that we can check them
|
| // after a successful initialization.
|
| - output_buffer_size_ = output_buffer_size;
|
| - output_channels_ = output_channels;
|
| - output_sample_rate_ = static_cast<double>(output_sample_rate);
|
| + output_audio_parameters_.Reset(
|
| + AudioParameters::AUDIO_PCM_LOW_LATENCY, out_channel_layout,
|
| + out_sample_rate, 16, out_buffer_size);
|
|
|
| - input_buffer_size_ = input_buffer_size;
|
| - input_channels_ = input_channels;
|
| - input_sample_rate_ = input_sample_rate;
|
| + input_audio_parameters_.Reset(
|
| + in_format, input_channel_layout, in_sample_rate,
|
| + 16, in_buffer_size);
|
|
|
| // Create and configure the audio capturing client.
|
| audio_input_device_ = new AudioInputDevice(
|
| - input_buffer_size, input_channels, input_sample_rate, this, this);
|
| + input_audio_parameters_, this, this);
|
|
|
| // Create and configure the audio rendering client.
|
| - audio_output_device_ = new AudioDevice(
|
| - output_buffer_size, output_channels, output_sample_rate, this);
|
| + audio_output_device_ = new AudioDevice(output_audio_parameters_, this);
|
|
|
| DCHECK(audio_input_device_);
|
| DCHECK(audio_output_device_);
|
| @@ -458,8 +456,8 @@ int32_t WebRtcAudioDeviceImpl::Init() {
|
| // It is assumed that each audio sample contains 16 bits and each
|
| // audio frame contains one or two audio samples depending on the
|
| // number of channels.
|
| - input_buffer_.reset(new int16[input_buffer_size * input_channels]);
|
| - output_buffer_.reset(new int16[output_buffer_size * output_channels]);
|
| + input_buffer_.reset(new int16[input_buffer_size() * input_channels()]);
|
| + output_buffer_.reset(new int16[output_buffer_size() * output_channels()]);
|
|
|
| DCHECK(input_buffer_.get());
|
| DCHECK(output_buffer_.get());
|
| @@ -469,11 +467,11 @@ int32_t WebRtcAudioDeviceImpl::Init() {
|
| initialized_ = true;
|
|
|
| DVLOG(1) << "Capture parameters (size/channels/rate): ("
|
| - << input_buffer_size_ << "/" << input_channels_ << "/"
|
| - << input_sample_rate_ << ")";
|
| + << input_buffer_size() << "/" << input_channels() << "/"
|
| + << input_sample_rate() << ")";
|
| DVLOG(1) << "Render parameters (size/channels/rate): ("
|
| - << output_buffer_size_ << "/" << output_channels_ << "/"
|
| - << output_sample_rate_ << ")";
|
| + << output_buffer_size() << "/" << output_channels() << "/"
|
| + << output_sample_rate() << ")";
|
| return 0;
|
| }
|
|
|
| @@ -851,7 +849,7 @@ int32_t WebRtcAudioDeviceImpl::MicrophoneBoost(bool* enabled) const {
|
|
|
| int32_t WebRtcAudioDeviceImpl::StereoPlayoutIsAvailable(bool* available) const {
|
| DCHECK(initialized_) << "Init() must be called first.";
|
| - *available = (output_channels_ == 2);
|
| + *available = (output_channels() == 2);
|
| return 0;
|
| }
|
|
|
| @@ -870,7 +868,7 @@ int32_t WebRtcAudioDeviceImpl::StereoPlayout(bool* enabled) const {
|
| int32_t WebRtcAudioDeviceImpl::StereoRecordingIsAvailable(
|
| bool* available) const {
|
| DCHECK(initialized_) << "Init() must be called first.";
|
| - *available = (input_channels_ == 2);
|
| + *available = (input_channels() == 2);
|
| return 0;
|
| }
|
|
|
| @@ -961,7 +959,7 @@ int32_t WebRtcAudioDeviceImpl::SetRecordingSampleRate(
|
| int32_t WebRtcAudioDeviceImpl::RecordingSampleRate(
|
| uint32_t* samples_per_sec) const {
|
| // Returns the sample rate set at construction.
|
| - *samples_per_sec = static_cast<uint32_t>(input_sample_rate_);
|
| + *samples_per_sec = static_cast<uint32_t>(input_sample_rate());
|
| return 0;
|
| }
|
|
|
| @@ -975,7 +973,7 @@ int32_t WebRtcAudioDeviceImpl::SetPlayoutSampleRate(
|
| int32_t WebRtcAudioDeviceImpl::PlayoutSampleRate(
|
| uint32_t* samples_per_sec) const {
|
| // Returns the sample rate set at construction.
|
| - *samples_per_sec = static_cast<uint32_t>(output_sample_rate_);
|
| + *samples_per_sec = static_cast<uint32_t>(output_sample_rate());
|
| return 0;
|
| }
|
|
|
|
|