Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/renderer/media/webrtc_audio_device_impl.h" | 5 #include "content/renderer/media/webrtc_audio_device_impl.h" |
| 6 | 6 |
| 7 #include "base/bind.h" | 7 #include "base/bind.h" |
| 8 #include "base/string_util.h" | 8 #include "base/string_util.h" |
| 9 #include "base/win/windows_version.h" | 9 #include "base/win/windows_version.h" |
| 10 #include "content/renderer/media/audio_hardware.h" | 10 #include "content/renderer/media/audio_hardware.h" |
| (...skipping 14 matching lines...) Expand all Loading... | |
| 25 // media::GetAudioInput[Output]HardwareSampleRate() is hardcoded to return | 25 // media::GetAudioInput[Output]HardwareSampleRate() is hardcoded to return |
| 26 // 48000 in both directions on Linux. | 26 // 48000 in both directions on Linux. |
| 27 static int kValidInputRates[] = {48000}; | 27 static int kValidInputRates[] = {48000}; |
| 28 static int kValidOutputRates[] = {48000}; | 28 static int kValidOutputRates[] = {48000}; |
| 29 #endif | 29 #endif |
| 30 | 30 |
| 31 WebRtcAudioDeviceImpl::WebRtcAudioDeviceImpl() | 31 WebRtcAudioDeviceImpl::WebRtcAudioDeviceImpl() |
| 32 : ref_count_(0), | 32 : ref_count_(0), |
| 33 render_loop_(base::MessageLoopProxy::current()), | 33 render_loop_(base::MessageLoopProxy::current()), |
| 34 audio_transport_callback_(NULL), | 34 audio_transport_callback_(NULL), |
| 35 input_buffer_size_(0), | |
| 36 output_buffer_size_(0), | |
| 37 input_channels_(0), | |
| 38 output_channels_(0), | |
| 39 input_sample_rate_(0), | |
| 40 output_sample_rate_(0), | |
| 41 input_delay_ms_(0), | 35 input_delay_ms_(0), |
| 42 output_delay_ms_(0), | 36 output_delay_ms_(0), |
| 43 last_error_(AudioDeviceModule::kAdmErrNone), | 37 last_error_(AudioDeviceModule::kAdmErrNone), |
| 44 last_process_time_(base::TimeTicks::Now()), | 38 last_process_time_(base::TimeTicks::Now()), |
| 45 session_id_(0), | 39 session_id_(0), |
| 46 bytes_per_sample_(0), | 40 bytes_per_sample_(0), |
| 47 initialized_(false), | 41 initialized_(false), |
| 48 playing_(false), | 42 playing_(false), |
| 49 recording_(false) { | 43 recording_(false) { |
| 50 DVLOG(1) << "WebRtcAudioDeviceImpl::WebRtcAudioDeviceImpl()"; | 44 DVLOG(1) << "WebRtcAudioDeviceImpl::WebRtcAudioDeviceImpl()"; |
| (...skipping 20 matching lines...) Expand all Loading... | |
| 71 if (ret == 0) { | 65 if (ret == 0) { |
| 72 delete this; | 66 delete this; |
| 73 } | 67 } |
| 74 return ret; | 68 return ret; |
| 75 } | 69 } |
| 76 | 70 |
| 77 size_t WebRtcAudioDeviceImpl::Render( | 71 size_t WebRtcAudioDeviceImpl::Render( |
| 78 const std::vector<float*>& audio_data, | 72 const std::vector<float*>& audio_data, |
| 79 size_t number_of_frames, | 73 size_t number_of_frames, |
| 80 size_t audio_delay_milliseconds) { | 74 size_t audio_delay_milliseconds) { |
| 81 DCHECK_LE(number_of_frames, output_buffer_size_); | 75 DCHECK_LE(number_of_frames, output_buffer_size()); |
| 82 | 76 |
| 83 { | 77 { |
| 84 base::AutoLock auto_lock(lock_); | 78 base::AutoLock auto_lock(lock_); |
| 85 // Store the reported audio delay locally. | 79 // Store the reported audio delay locally. |
| 86 output_delay_ms_ = audio_delay_milliseconds; | 80 output_delay_ms_ = audio_delay_milliseconds; |
| 87 } | 81 } |
| 88 | 82 |
| 89 const int channels = audio_data.size(); | 83 const int channels = audio_data.size(); |
| 90 DCHECK_LE(channels, output_channels_); | 84 DCHECK_LE(channels, output_channels()); |
| 91 | 85 |
| 92 int samples_per_sec = static_cast<int>(output_sample_rate_); | 86 int samples_per_sec = static_cast<int>(output_sample_rate()); |
|
tommi (sloooow) - chröme
2012/03/10 10:11:32
remove cast
vrk (LEFT CHROMIUM)
2012/03/16 18:30:41
Done.
| |
| 93 if (samples_per_sec == 44100) { | 87 if (samples_per_sec == 44100) { |
| 94 // Even if the hardware runs at 44.1kHz, we use 44.0 internally. | 88 // Even if the hardware runs at 44.1kHz, we use 44.0 internally. |
| 95 samples_per_sec = 44000; | 89 samples_per_sec = 44000; |
| 96 } | 90 } |
| 97 uint32_t samples_per_10_msec = (samples_per_sec / 100); | 91 uint32_t samples_per_10_msec = (samples_per_sec / 100); |
| 98 const int bytes_per_10_msec = | 92 const int bytes_per_10_msec = |
| 99 channels * samples_per_10_msec * bytes_per_sample_; | 93 channels * samples_per_10_msec * bytes_per_sample_; |
| 100 | 94 |
| 101 uint32_t num_audio_samples = 0; | 95 uint32_t num_audio_samples = 0; |
| 102 size_t accumulated_audio_samples = 0; | 96 size_t accumulated_audio_samples = 0; |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 135 void WebRtcAudioDeviceImpl::OnRenderError() { | 129 void WebRtcAudioDeviceImpl::OnRenderError() { |
| 136 DCHECK_EQ(MessageLoop::current(), ChildProcess::current()->io_message_loop()); | 130 DCHECK_EQ(MessageLoop::current(), ChildProcess::current()->io_message_loop()); |
| 137 // TODO(henrika): Implement error handling. | 131 // TODO(henrika): Implement error handling. |
| 138 LOG(ERROR) << "OnRenderError()"; | 132 LOG(ERROR) << "OnRenderError()"; |
| 139 } | 133 } |
| 140 | 134 |
| 141 void WebRtcAudioDeviceImpl::Capture( | 135 void WebRtcAudioDeviceImpl::Capture( |
| 142 const std::vector<float*>& audio_data, | 136 const std::vector<float*>& audio_data, |
| 143 size_t number_of_frames, | 137 size_t number_of_frames, |
| 144 size_t audio_delay_milliseconds) { | 138 size_t audio_delay_milliseconds) { |
| 145 DCHECK_LE(number_of_frames, input_buffer_size_); | 139 DCHECK_LE(number_of_frames, input_buffer_size()); |
| 146 | 140 |
| 147 int output_delay_ms = 0; | 141 int output_delay_ms = 0; |
| 148 { | 142 { |
| 149 base::AutoLock auto_lock(lock_); | 143 base::AutoLock auto_lock(lock_); |
| 150 // Store the reported audio delay locally. | 144 // Store the reported audio delay locally. |
| 151 input_delay_ms_ = audio_delay_milliseconds; | 145 input_delay_ms_ = audio_delay_milliseconds; |
| 152 output_delay_ms = output_delay_ms_; | 146 output_delay_ms = output_delay_ms_; |
| 153 } | 147 } |
| 154 | 148 |
| 155 const int channels = audio_data.size(); | 149 const int channels = audio_data.size(); |
| 156 DCHECK_LE(channels, input_channels_); | 150 DCHECK_LE(channels, input_channels()); |
| 157 uint32_t new_mic_level = 0; | 151 uint32_t new_mic_level = 0; |
| 158 | 152 |
| 159 // Interleave, scale, and clip input to int16 and store result in | 153 // Interleave, scale, and clip input to int16 and store result in |
| 160 // a local byte buffer. | 154 // a local byte buffer. |
| 161 media::InterleaveFloatToInt16(audio_data, | 155 media::InterleaveFloatToInt16(audio_data, |
| 162 input_buffer_.get(), | 156 input_buffer_.get(), |
| 163 number_of_frames); | 157 number_of_frames); |
| 164 | 158 |
| 165 int samples_per_sec = static_cast<int>(input_sample_rate_); | 159 int samples_per_sec = static_cast<int>(input_sample_rate()); |
|
tommi (sloooow) - chröme
2012/03/10 10:11:32
remove cast
vrk (LEFT CHROMIUM)
2012/03/16 18:30:41
Done.
| |
| 166 if (samples_per_sec == 44100) { | 160 if (samples_per_sec == 44100) { |
| 167 // Even if the hardware runs at 44.1kHz, we use 44.0 internally. | 161 // Even if the hardware runs at 44.1kHz, we use 44.0 internally. |
| 168 samples_per_sec = 44000; | 162 samples_per_sec = 44000; |
| 169 } | 163 } |
| 170 const int samples_per_10_msec = (samples_per_sec / 100); | 164 const int samples_per_10_msec = (samples_per_sec / 100); |
| 171 const int bytes_per_10_msec = | 165 const int bytes_per_10_msec = |
| 172 channels * samples_per_10_msec * bytes_per_sample_; | 166 channels * samples_per_10_msec * bytes_per_sample_; |
| 173 size_t accumulated_audio_samples = 0; | 167 size_t accumulated_audio_samples = 0; |
| 174 | 168 |
| 175 char* audio_byte_buffer = reinterpret_cast<char*>(input_buffer_.get()); | 169 char* audio_byte_buffer = reinterpret_cast<char*>(input_buffer_.get()); |
| (...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 294 DCHECK(!audio_output_device_); | 288 DCHECK(!audio_output_device_); |
| 295 DCHECK(!input_buffer_.get()); | 289 DCHECK(!input_buffer_.get()); |
| 296 DCHECK(!output_buffer_.get()); | 290 DCHECK(!output_buffer_.get()); |
| 297 | 291 |
| 298 // TODO(henrika): it could be possible to allow one of the directions (input | 292 // TODO(henrika): it could be possible to allow one of the directions (input |
| 299 // or output) to use a non-supported rate. As an example: if only the | 293 // or output) to use a non-supported rate. As an example: if only the |
| 300 // output rate is OK, we could finalize Init() and only set up an AudioDevice. | 294 // output rate is OK, we could finalize Init() and only set up an AudioDevice. |
| 301 | 295 |
| 302 // Ask the browser for the default audio output hardware sample-rate. | 296 // Ask the browser for the default audio output hardware sample-rate. |
| 303 // This request is based on a synchronous IPC message. | 297 // This request is based on a synchronous IPC message. |
| 304 int output_sample_rate = | 298 int out_sample_rate = |
| 305 static_cast<int>(audio_hardware::GetOutputSampleRate()); | 299 static_cast<int>(audio_hardware::GetOutputSampleRate()); |
|
tommi (sloooow) - chröme
2012/03/10 10:11:32
pretty please request. to make the APIs consisten
vrk (LEFT CHROMIUM)
2012/03/16 18:30:41
Really good idea! I changed everything to int.
| |
| 306 DVLOG(1) << "Audio output hardware sample rate: " << output_sample_rate; | 300 DVLOG(1) << "Audio output hardware sample rate: " << out_sample_rate; |
| 307 | 301 |
| 308 // Verify that the reported output hardware sample rate is supported | 302 // Verify that the reported output hardware sample rate is supported |
| 309 // on the current platform. | 303 // on the current platform. |
| 310 if (std::find(&kValidOutputRates[0], | 304 if (std::find(&kValidOutputRates[0], |
| 311 &kValidOutputRates[0] + arraysize(kValidOutputRates), | 305 &kValidOutputRates[0] + arraysize(kValidOutputRates), |
| 312 output_sample_rate) == | 306 out_sample_rate) == |
| 313 &kValidOutputRates[arraysize(kValidOutputRates)]) { | 307 &kValidOutputRates[arraysize(kValidOutputRates)]) { |
| 314 DLOG(ERROR) << output_sample_rate << " is not a supported output rate."; | 308 DLOG(ERROR) << out_sample_rate << " is not a supported output rate."; |
| 315 return -1; | 309 return -1; |
| 316 } | 310 } |
| 317 | 311 |
| 318 // Ask the browser for the default audio input hardware sample-rate. | 312 // Ask the browser for the default audio input hardware sample-rate. |
| 319 // This request is based on a synchronous IPC message. | 313 // This request is based on a synchronous IPC message. |
| 320 int input_sample_rate = | 314 int in_sample_rate = |
| 321 static_cast<int>(audio_hardware::GetInputSampleRate()); | 315 static_cast<int>(audio_hardware::GetInputSampleRate()); |
|
tommi (sloooow) - chröme
2012/03/10 10:11:32
ditto
vrk (LEFT CHROMIUM)
2012/03/16 18:30:41
Done.
| |
| 322 DVLOG(1) << "Audio input hardware sample rate: " << input_sample_rate; | 316 DVLOG(1) << "Audio input hardware sample rate: " << in_sample_rate; |
| 323 | 317 |
| 324 // Verify that the reported input hardware sample rate is supported | 318 // Verify that the reported input hardware sample rate is supported |
| 325 // on the current platform. | 319 // on the current platform. |
| 326 if (std::find(&kValidInputRates[0], | 320 if (std::find(&kValidInputRates[0], |
| 327 &kValidInputRates[0] + arraysize(kValidInputRates), | 321 &kValidInputRates[0] + arraysize(kValidInputRates), |
| 328 input_sample_rate) == | 322 in_sample_rate) == |
| 329 &kValidInputRates[arraysize(kValidInputRates)]) { | 323 &kValidInputRates[arraysize(kValidInputRates)]) { |
| 330 DLOG(ERROR) << input_sample_rate << " is not a supported input rate."; | 324 DLOG(ERROR) << in_sample_rate << " is not a supported input rate."; |
| 331 return -1; | 325 return -1; |
| 332 } | 326 } |
| 333 | 327 |
| 334 // Ask the browser for the default number of audio input channels. | 328 // Ask the browser for the default number of audio input channels. |
| 335 // This request is based on a synchronous IPC message. | 329 // This request is based on a synchronous IPC message. |
| 336 int input_channels = audio_hardware::GetInputChannelCount(); | 330 ChannelLayout input_channel_layout = |
| 337 DVLOG(1) << "Audio input hardware channels: " << input_channels; | 331 audio_hardware::GetInputChannelLayout(); |
| 332 DVLOG(1) << "Audio input hardware channels: " << input_channel_layout; | |
| 338 | 333 |
| 339 int output_channels = 0; | 334 ChannelLayout out_channel_layout = CHANNEL_LAYOUT_MONO; |
| 340 | 335 AudioParameters::Format in_format = AudioParameters::AUDIO_PCM_LINEAR; |
| 341 size_t input_buffer_size = 0; | 336 size_t in_buffer_size = 0; |
| 342 size_t output_buffer_size = 0; | 337 size_t out_buffer_size = 0; |
| 343 | 338 |
| 344 // TODO(henrika): factor out all platform specific parts in separate | 339 // TODO(henrika): factor out all platform specific parts in separate |
| 345 // functions. Code is a bit messy right now. | 340 // functions. Code is a bit messy right now. |
| 346 | 341 |
| 347 // Windows | 342 // Windows |
| 348 #if defined(OS_WIN) | 343 #if defined(OS_WIN) |
| 349 // Always use stereo rendering on Windows. | 344 // Always use stereo rendering on Windows. |
| 350 output_channels = 2; | 345 out_channel_layout = CHANNEL_LAYOUT_STEREO; |
| 346 | |
| 347 DVLOG(1) << "Using AUDIO_PCM_LOW_LATENCY as input mode on Windows."; | |
| 348 in_format = AudioParameters::AUDIO_PCM_LOW_LATENCY; | |
| 351 | 349 |
| 352 // Capture side: AUDIO_PCM_LOW_LATENCY is based on the Core Audio (WASAPI) | 350 // Capture side: AUDIO_PCM_LOW_LATENCY is based on the Core Audio (WASAPI) |
| 353 // API which was introduced in Windows Vista. For lower Windows versions, | 351 // API which was introduced in Windows Vista. For lower Windows versions, |
| 354 // a callback-driven Wave implementation is used instead. An input buffer | 352 // a callback-driven Wave implementation is used instead. An input buffer |
| 355 // size of 10ms works well for both these implementations. | 353 // size of 10ms works well for both these implementations. |
| 356 | 354 |
| 357 // Use different buffer sizes depending on the current hardware sample rate. | 355 // Use different buffer sizes depending on the current hardware sample rate. |
| 358 if (input_sample_rate == 44100) { | 356 if (in_sample_rate == 44100) { |
| 359 // We do run at 44.1kHz at the actual audio layer, but ask for frames | 357 // We do run at 44.1kHz at the actual audio layer, but ask for frames |
| 360 // at 44.0kHz to ensure that we can feed them to the webrtc::VoiceEngine. | 358 // at 44.0kHz to ensure that we can feed them to the webrtc::VoiceEngine. |
| 361 input_buffer_size = 440; | 359 in_buffer_size = 440; |
| 362 } else { | 360 } else { |
| 363 input_buffer_size = (input_sample_rate / 100); | 361 in_buffer_size = (in_sample_rate / 100); |
| 364 } | 362 } |
| 365 | 363 |
| 366 // Render side: AUDIO_PCM_LOW_LATENCY is based on the Core Audio (WASAPI) | 364 // Render side: AUDIO_PCM_LOW_LATENCY is based on the Core Audio (WASAPI) |
| 367 // API which was introduced in Windows Vista. For lower Windows versions, | 365 // API which was introduced in Windows Vista. For lower Windows versions, |
| 368 // a callback-driven Wave implementation is used instead. An output buffer | 366 // a callback-driven Wave implementation is used instead. An output buffer |
| 369 // size of 10ms works well for WASAPI but 30ms is needed for Wave. | 367 // size of 10ms works well for WASAPI but 30ms is needed for Wave. |
| 370 | 368 |
| 371 // Use different buffer sizes depending on the current hardware sample rate. | 369 // Use different buffer sizes depending on the current hardware sample rate. |
| 372 if (output_sample_rate == 96000 || output_sample_rate == 48000) { | 370 if (out_sample_rate == 96000 || out_sample_rate == 48000) { |
| 373 output_buffer_size = (output_sample_rate / 100); | 371 out_buffer_size = (out_sample_rate / 100); |
| 374 } else { | 372 } else { |
| 375 // We do run at 44.1kHz at the actual audio layer, but ask for frames | 373 // We do run at 44.1kHz at the actual audio layer, but ask for frames |
| 376 // at 44.0kHz to ensure that we can feed them to the webrtc::VoiceEngine. | 374 // at 44.0kHz to ensure that we can feed them to the webrtc::VoiceEngine. |
| 377 // TODO(henrika): figure out why we seem to need 20ms here for glitch- | 375 // TODO(henrika): figure out why we seem to need 20ms here for glitch- |
| 378 // free audio. | 376 // free audio. |
| 379 output_buffer_size = 2 * 440; | 377 out_buffer_size = 2 * 440; |
| 380 } | 378 } |
| 381 | 379 |
| 382 // Windows XP and lower can't cope with 10 ms output buffer size. | 380 // Windows XP and lower can't cope with 10 ms output buffer size. |
| 383 // It must be extended to 30 ms (60 ms will be used internally by WaveOut). | 381 // It must be extended to 30 ms (60 ms will be used internally by WaveOut). |
| 384 if (!media::IsWASAPISupported()) { | 382 if (!media::IsWASAPISupported()) { |
| 385 output_buffer_size = 3 * output_buffer_size; | 383 out_buffer_size = 3 * out_buffer_size; |
| 386 DLOG(WARNING) << "Extending the output buffer size by a factor of three " | 384 DLOG(WARNING) << "Extending the output buffer size by a factor of three " |
| 387 << "since Windows XP has been detected."; | 385 << "since Windows XP has been detected."; |
| 388 } | 386 } |
| 389 | 387 |
| 390 // Mac OS X | 388 // Mac OS X |
| 391 #elif defined(OS_MACOSX) | 389 #elif defined(OS_MACOSX) |
| 392 output_channels = 1; | 390 out_channel_layout = CHANNEL_LAYOUT_MONO; |
| 391 | |
| 392 DVLOG(1) << "Using AUDIO_PCM_LOW_LATENCY as input mode on Mac OS X."; | |
| 393 in_format = AudioParameters::AUDIO_PCM_LOW_LATENCY; | |
| 393 | 394 |
| 394 // Capture side: AUDIO_PCM_LOW_LATENCY on Mac OS X is based on a callback- | 395 // Capture side: AUDIO_PCM_LOW_LATENCY on Mac OS X is based on a callback- |
| 395 // driven Core Audio implementation. Tests have shown that 10ms is a suitable | 396 // driven Core Audio implementation. Tests have shown that 10ms is a suitable |
| 396 // frame size to use, both for 48kHz and 44.1kHz. | 397 // frame size to use, both for 48kHz and 44.1kHz. |
| 397 | 398 |
| 398 // Use different buffer sizes depending on the current hardware sample rate. | 399 // Use different buffer sizes depending on the current hardware sample rate. |
| 399 if (input_sample_rate == 44100) { | 400 if (in_sample_rate == 44100) { |
| 400 // We do run at 44.1kHz at the actual audio layer, but ask for frames | 401 // We do run at 44.1kHz at the actual audio layer, but ask for frames |
| 401 // at 44.0kHz to ensure that we can feed them to the webrtc::VoiceEngine. | 402 // at 44.0kHz to ensure that we can feed them to the webrtc::VoiceEngine. |
| 402 input_buffer_size = 440; | 403 in_buffer_size = 440; |
| 403 } else { | 404 } else { |
| 404 input_buffer_size = (input_sample_rate / 100); | 405 in_buffer_size = (in_sample_rate / 100); |
| 405 } | 406 } |
| 406 | 407 |
| 407 // Render side: AUDIO_PCM_LOW_LATENCY on Mac OS X is based on a callback- | 408 // Render side: AUDIO_PCM_LOW_LATENCY on Mac OS X is based on a callback- |
| 408 // driven Core Audio implementation. Tests have shown that 10ms is a suitable | 409 // driven Core Audio implementation. Tests have shown that 10ms is a suitable |
| 409 // frame size to use, both for 48kHz and 44.1kHz. | 410 // frame size to use, both for 48kHz and 44.1kHz. |
| 410 | 411 |
| 411 // Use different buffer sizes depending on the current hardware sample rate. | 412 // Use different buffer sizes depending on the current hardware sample rate. |
| 412 if (output_sample_rate == 48000) { | 413 if (out_sample_rate == 48000) { |
| 413 output_buffer_size = 480; | 414 out_buffer_size = 480; |
| 414 } else { | 415 } else { |
| 415 // We do run at 44.1kHz at the actual audio layer, but ask for frames | 416 // We do run at 44.1kHz at the actual audio layer, but ask for frames |
| 416 // at 44.0kHz to ensure that we can feed them to the webrtc::VoiceEngine. | 417 // at 44.0kHz to ensure that we can feed them to the webrtc::VoiceEngine. |
| 417 output_buffer_size = 440; | 418 out_buffer_size = 440; |
| 418 } | 419 } |
| 419 // Linux | 420 // Linux |
| 420 #elif defined(OS_LINUX) || defined(OS_OPENBSD) | 421 #elif defined(OS_LINUX) || defined(OS_OPENBSD) |
| 421 input_channels = 2; | 422 input_channel_layout = CHANNEL_LAYOUT_STEREO; |
| 422 output_channels = 1; | 423 out_channel_layout = CHANNEL_LAYOUT_MONO; |
| 423 | 424 |
| 424 // Based on tests using the current ALSA implementation in Chrome, we have | 425 // Based on tests using the current ALSA implementation in Chrome, we have |
| 425 // found that the best combination is 20ms on the input side and 10ms on the | 426 // found that the best combination is 20ms on the input side and 10ms on the |
| 426 // output side. | 427 // output side. |
| 427 // TODO(henrika): It might be possible to reduce the input buffer | 428 // TODO(henrika): It might be possible to reduce the input buffer |
| 428 // size and reduce the delay even more. | 429 // size and reduce the delay even more. |
| 429 input_buffer_size = 2 * 480; | 430 in_buffer_size = 2 * 480; |
| 430 output_buffer_size = 480; | 431 out_buffer_size = 480; |
| 431 #else | 432 #else |
| 432 DLOG(ERROR) << "Unsupported platform"; | 433 DLOG(ERROR) << "Unsupported platform"; |
| 433 return -1; | 434 return -1; |
| 434 #endif | 435 #endif |
| 435 | 436 |
| 436 // Store utilized parameters to ensure that we can check them | 437 // Store utilized parameters to ensure that we can check them |
| 437 // after a successful initialization. | 438 // after a successful initialization. |
| 438 output_buffer_size_ = output_buffer_size; | 439 output_audio_parameters_.Reset( |
| 439 output_channels_ = output_channels; | 440 AudioParameters::AUDIO_PCM_LOW_LATENCY, out_channel_layout, |
|
vrk (LEFT CHROMIUM)
2012/03/09 20:59:32
Prior to this CL, these values would have been set
tommi (sloooow) - chröme
2012/03/10 10:11:32
agreed.
| |
| 440 output_sample_rate_ = static_cast<double>(output_sample_rate); | 441 out_sample_rate, 16, out_buffer_size); |
| 441 | 442 |
| 442 input_buffer_size_ = input_buffer_size; | 443 input_audio_parameters_.Reset( |
| 443 input_channels_ = input_channels; | 444 in_format, input_channel_layout, in_sample_rate, |
| 444 input_sample_rate_ = input_sample_rate; | 445 16, in_buffer_size); |
| 445 | 446 |
| 446 // Create and configure the audio capturing client. | 447 // Create and configure the audio capturing client. |
| 447 audio_input_device_ = new AudioInputDevice( | 448 audio_input_device_ = new AudioInputDevice( |
| 448 input_buffer_size, input_channels, input_sample_rate, this, this); | 449 input_audio_parameters_, this, this); |
| 449 | 450 |
| 450 // Create and configure the audio rendering client. | 451 // Create and configure the audio rendering client. |
| 451 audio_output_device_ = new AudioDevice( | 452 audio_output_device_ = new AudioDevice(output_audio_parameters_, this); |
| 452 output_buffer_size, output_channels, output_sample_rate, this); | |
| 453 | 453 |
| 454 DCHECK(audio_input_device_); | 454 DCHECK(audio_input_device_); |
| 455 DCHECK(audio_output_device_); | 455 DCHECK(audio_output_device_); |
| 456 | 456 |
| 457 // Allocate local audio buffers based on the parameters above. | 457 // Allocate local audio buffers based on the parameters above. |
| 458 // It is assumed that each audio sample contains 16 bits and each | 458 // It is assumed that each audio sample contains 16 bits and each |
| 459 // audio frame contains one or two audio samples depending on the | 459 // audio frame contains one or two audio samples depending on the |
| 460 // number of channels. | 460 // number of channels. |
| 461 input_buffer_.reset(new int16[input_buffer_size * input_channels]); | 461 input_buffer_.reset(new int16[input_buffer_size() * input_channels()]); |
| 462 output_buffer_.reset(new int16[output_buffer_size * output_channels]); | 462 output_buffer_.reset(new int16[output_buffer_size() * output_channels()]); |
| 463 | 463 |
| 464 DCHECK(input_buffer_.get()); | 464 DCHECK(input_buffer_.get()); |
| 465 DCHECK(output_buffer_.get()); | 465 DCHECK(output_buffer_.get()); |
| 466 | 466 |
| 467 bytes_per_sample_ = sizeof(*input_buffer_.get()); | 467 bytes_per_sample_ = sizeof(*input_buffer_.get()); |
| 468 | 468 |
| 469 initialized_ = true; | 469 initialized_ = true; |
| 470 | 470 |
| 471 DVLOG(1) << "Capture parameters (size/channels/rate): (" | 471 DVLOG(1) << "Capture parameters (size/channels/rate): (" |
| 472 << input_buffer_size_ << "/" << input_channels_ << "/" | 472 << input_buffer_size() << "/" << input_channels() << "/" |
| 473 << input_sample_rate_ << ")"; | 473 << input_sample_rate() << ")"; |
| 474 DVLOG(1) << "Render parameters (size/channels/rate): (" | 474 DVLOG(1) << "Render parameters (size/channels/rate): (" |
| 475 << output_buffer_size_ << "/" << output_channels_ << "/" | 475 << output_buffer_size() << "/" << output_channels() << "/" |
| 476 << output_sample_rate_ << ")"; | 476 << output_sample_rate() << ")"; |
| 477 return 0; | 477 return 0; |
| 478 } | 478 } |
| 479 | 479 |
| 480 void WebRtcAudioDeviceImpl::InitOnRenderThread(int32_t* error, | 480 void WebRtcAudioDeviceImpl::InitOnRenderThread(int32_t* error, |
| 481 base::WaitableEvent* event) { | 481 base::WaitableEvent* event) { |
| 482 DCHECK(render_loop_->BelongsToCurrentThread()); | 482 DCHECK(render_loop_->BelongsToCurrentThread()); |
| 483 *error = Init(); | 483 *error = Init(); |
| 484 event->Signal(); | 484 event->Signal(); |
| 485 } | 485 } |
| 486 | 486 |
| (...skipping 357 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 844 return -1; | 844 return -1; |
| 845 } | 845 } |
| 846 | 846 |
| 847 int32_t WebRtcAudioDeviceImpl::MicrophoneBoost(bool* enabled) const { | 847 int32_t WebRtcAudioDeviceImpl::MicrophoneBoost(bool* enabled) const { |
| 848 NOTIMPLEMENTED(); | 848 NOTIMPLEMENTED(); |
| 849 return -1; | 849 return -1; |
| 850 } | 850 } |
| 851 | 851 |
| 852 int32_t WebRtcAudioDeviceImpl::StereoPlayoutIsAvailable(bool* available) const { | 852 int32_t WebRtcAudioDeviceImpl::StereoPlayoutIsAvailable(bool* available) const { |
| 853 DCHECK(initialized_) << "Init() must be called first."; | 853 DCHECK(initialized_) << "Init() must be called first."; |
| 854 *available = (output_channels_ == 2); | 854 *available = (output_channels() == 2); |
| 855 return 0; | 855 return 0; |
| 856 } | 856 } |
| 857 | 857 |
| 858 int32_t WebRtcAudioDeviceImpl::SetStereoPlayout(bool enable) { | 858 int32_t WebRtcAudioDeviceImpl::SetStereoPlayout(bool enable) { |
| 859 DVLOG(2) << "WARNING: WebRtcAudioDeviceImpl::SetStereoPlayout() " | 859 DVLOG(2) << "WARNING: WebRtcAudioDeviceImpl::SetStereoPlayout() " |
| 860 << "NOT IMPLEMENTED"; | 860 << "NOT IMPLEMENTED"; |
| 861 return 0; | 861 return 0; |
| 862 } | 862 } |
| 863 | 863 |
| 864 int32_t WebRtcAudioDeviceImpl::StereoPlayout(bool* enabled) const { | 864 int32_t WebRtcAudioDeviceImpl::StereoPlayout(bool* enabled) const { |
| 865 DVLOG(2) << "WARNING: WebRtcAudioDeviceImpl::StereoPlayout() " | 865 DVLOG(2) << "WARNING: WebRtcAudioDeviceImpl::StereoPlayout() " |
| 866 << "NOT IMPLEMENTED"; | 866 << "NOT IMPLEMENTED"; |
| 867 return 0; | 867 return 0; |
| 868 } | 868 } |
| 869 | 869 |
| 870 int32_t WebRtcAudioDeviceImpl::StereoRecordingIsAvailable( | 870 int32_t WebRtcAudioDeviceImpl::StereoRecordingIsAvailable( |
| 871 bool* available) const { | 871 bool* available) const { |
| 872 DCHECK(initialized_) << "Init() must be called first."; | 872 DCHECK(initialized_) << "Init() must be called first."; |
| 873 *available = (input_channels_ == 2); | 873 *available = (input_channels() == 2); |
| 874 return 0; | 874 return 0; |
| 875 } | 875 } |
| 876 | 876 |
| 877 int32_t WebRtcAudioDeviceImpl::SetStereoRecording(bool enable) { | 877 int32_t WebRtcAudioDeviceImpl::SetStereoRecording(bool enable) { |
| 878 DVLOG(2) << "WARNING: WebRtcAudioDeviceImpl::SetStereoRecording() " | 878 DVLOG(2) << "WARNING: WebRtcAudioDeviceImpl::SetStereoRecording() " |
| 879 << "NOT IMPLEMENTED"; | 879 << "NOT IMPLEMENTED"; |
| 880 return -1; | 880 return -1; |
| 881 } | 881 } |
| 882 | 882 |
| 883 int32_t WebRtcAudioDeviceImpl::StereoRecording(bool* enabled) const { | 883 int32_t WebRtcAudioDeviceImpl::StereoRecording(bool* enabled) const { |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 954 int32_t WebRtcAudioDeviceImpl::SetRecordingSampleRate( | 954 int32_t WebRtcAudioDeviceImpl::SetRecordingSampleRate( |
| 955 const uint32_t samples_per_sec) { | 955 const uint32_t samples_per_sec) { |
| 956 // Sample rate should only be set at construction. | 956 // Sample rate should only be set at construction. |
| 957 NOTIMPLEMENTED(); | 957 NOTIMPLEMENTED(); |
| 958 return -1; | 958 return -1; |
| 959 } | 959 } |
| 960 | 960 |
| 961 int32_t WebRtcAudioDeviceImpl::RecordingSampleRate( | 961 int32_t WebRtcAudioDeviceImpl::RecordingSampleRate( |
| 962 uint32_t* samples_per_sec) const { | 962 uint32_t* samples_per_sec) const { |
| 963 // Returns the sample rate set at construction. | 963 // Returns the sample rate set at construction. |
| 964 *samples_per_sec = static_cast<uint32_t>(input_sample_rate_); | 964 *samples_per_sec = static_cast<uint32_t>(input_sample_rate()); |
| 965 return 0; | 965 return 0; |
| 966 } | 966 } |
| 967 | 967 |
| 968 int32_t WebRtcAudioDeviceImpl::SetPlayoutSampleRate( | 968 int32_t WebRtcAudioDeviceImpl::SetPlayoutSampleRate( |
| 969 const uint32_t samples_per_sec) { | 969 const uint32_t samples_per_sec) { |
| 970 // Sample rate should only be set at construction. | 970 // Sample rate should only be set at construction. |
| 971 NOTIMPLEMENTED(); | 971 NOTIMPLEMENTED(); |
| 972 return -1; | 972 return -1; |
| 973 } | 973 } |
| 974 | 974 |
| 975 int32_t WebRtcAudioDeviceImpl::PlayoutSampleRate( | 975 int32_t WebRtcAudioDeviceImpl::PlayoutSampleRate( |
| 976 uint32_t* samples_per_sec) const { | 976 uint32_t* samples_per_sec) const { |
| 977 // Returns the sample rate set at construction. | 977 // Returns the sample rate set at construction. |
| 978 *samples_per_sec = static_cast<uint32_t>(output_sample_rate_); | 978 *samples_per_sec = static_cast<uint32_t>(output_sample_rate()); |
| 979 return 0; | 979 return 0; |
| 980 } | 980 } |
| 981 | 981 |
| 982 int32_t WebRtcAudioDeviceImpl::ResetAudioDevice() { | 982 int32_t WebRtcAudioDeviceImpl::ResetAudioDevice() { |
| 983 NOTIMPLEMENTED(); | 983 NOTIMPLEMENTED(); |
| 984 return -1; | 984 return -1; |
| 985 } | 985 } |
| 986 | 986 |
| 987 int32_t WebRtcAudioDeviceImpl::SetLoudspeakerStatus(bool enable) { | 987 int32_t WebRtcAudioDeviceImpl::SetLoudspeakerStatus(bool enable) { |
| 988 NOTIMPLEMENTED(); | 988 NOTIMPLEMENTED(); |
| 989 return -1; | 989 return -1; |
| 990 } | 990 } |
| 991 | 991 |
| 992 int32_t WebRtcAudioDeviceImpl::GetLoudspeakerStatus(bool* enabled) const { | 992 int32_t WebRtcAudioDeviceImpl::GetLoudspeakerStatus(bool* enabled) const { |
| 993 NOTIMPLEMENTED(); | 993 NOTIMPLEMENTED(); |
| 994 return -1; | 994 return -1; |
| 995 } | 995 } |
| 996 | 996 |
| 997 void WebRtcAudioDeviceImpl::SetSessionId(int session_id) { | 997 void WebRtcAudioDeviceImpl::SetSessionId(int session_id) { |
| 998 session_id_ = session_id; | 998 session_id_ = session_id; |
| 999 } | 999 } |
| OLD | NEW |