Index: content/renderer/media/webaudio_capturer_source.cc |
diff --git a/content/renderer/media/webaudio_capturer_source.cc b/content/renderer/media/webaudio_capturer_source.cc |
index 35cd99ce3b03d0b13d38672bf6b1c70b928fa223..263ba4745007a424c20f23af8ea8d05106dabaca 100644 |
--- a/content/renderer/media/webaudio_capturer_source.cc |
+++ b/content/renderer/media/webaudio_capturer_source.cc |
@@ -5,7 +5,8 @@ |
#include "content/renderer/media/webaudio_capturer_source.h" |
#include "base/logging.h" |
-#include "content/renderer/media/webrtc_audio_capturer.h" |
+#include "content/renderer/media/webrtc_local_audio_source_provider.h" |
+#include "content/renderer/media/webrtc_local_audio_track.h" |
using media::AudioBus; |
using media::AudioFifo; |
@@ -14,15 +15,13 @@ using media::ChannelLayout; |
using media::CHANNEL_LAYOUT_MONO; |
using media::CHANNEL_LAYOUT_STEREO; |
-static const int kFifoSize = 2048; |
+static const int kMaxNumberOfBuffersInFifo = 5; |
namespace content { |
-WebAudioCapturerSource::WebAudioCapturerSource(WebRtcAudioCapturer* capturer) |
- : capturer_(capturer), |
- set_format_channels_(0), |
- callback_(0), |
- started_(false) { |
+WebAudioCapturerSource::WebAudioCapturerSource() |
+ : track_(NULL), |
+ source_provider_(NULL) { |
} |
WebAudioCapturerSource::~WebAudioCapturerSource() { |
@@ -30,57 +29,72 @@ WebAudioCapturerSource::~WebAudioCapturerSource() { |
void WebAudioCapturerSource::setFormat( |
size_t number_of_channels, float sample_rate) { |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
DVLOG(1) << "WebAudioCapturerSource::setFormat(sample_rate=" |
<< sample_rate << ")"; |
- if (number_of_channels <= 2) { |
- set_format_channels_ = number_of_channels; |
- ChannelLayout channel_layout = |
- number_of_channels == 1 ? CHANNEL_LAYOUT_MONO : CHANNEL_LAYOUT_STEREO; |
- capturer_->SetCapturerSource(this, channel_layout, sample_rate); |
- } else { |
- // TODO(crogers): Handle more than just the mono and stereo cases. |
+ if (number_of_channels > 2) { |
+ // TODO(xians): Handle more than just the mono and stereo cases. |
LOG(WARNING) << "WebAudioCapturerSource::setFormat() : unhandled format."; |
+ return; |
} |
+ |
+ ChannelLayout channel_layout = |
+ number_of_channels == 1 ? CHANNEL_LAYOUT_MONO : CHANNEL_LAYOUT_STEREO; |
+ |
+ base::AutoLock auto_lock(lock_); |
+ // Set the format used by this WebAudioCapturerSource. We are using 10ms data |
+ // as buffer size since that is the native buffer size of WebRtc packet |
+ // running on. |
+ params_.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
+ channel_layout, number_of_channels, 0, sample_rate, 16, |
+ sample_rate / 100); |
+ |
+ // Update the downstream client to use the same format as what WebKit |
+ // is using. |
+ if (track_) |
+ track_->SetCaptureFormat(params_); |
+ |
+ wrapper_bus_ = AudioBus::CreateWrapper(params_.channels()); |
+ capture_bus_ = AudioBus::Create(params_); |
+ fifo_.reset(new AudioFifo( |
+ params_.channels(), |
+ kMaxNumberOfBuffersInFifo * params_.frames_per_buffer())); |
} |
-void WebAudioCapturerSource::Initialize( |
- const media::AudioParameters& params, |
- media::AudioCapturerSource::CaptureCallback* callback, |
- int session_id) { |
+void WebAudioCapturerSource::Start( |
+ WebRtcLocalAudioTrack* track, |
+ WebRtcLocalAudioSourceProvider* source_provider) { |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
+ DCHECK(track); |
+ DCHECK(source_provider); |
// The downstream client should be configured the same as what WebKit |
// is feeding it. |
- DCHECK_EQ(set_format_channels_, params.channels()); |
+ track->SetCaptureFormat(params_); |
base::AutoLock auto_lock(lock_); |
- params_ = params; |
- callback_ = callback; |
- wrapper_bus_ = AudioBus::CreateWrapper(params.channels()); |
- capture_bus_ = AudioBus::Create(params); |
- fifo_.reset(new AudioFifo(params.channels(), kFifoSize)); |
-} |
- |
-void WebAudioCapturerSource::Start() { |
- started_ = true; |
+ track_ = track; |
+ source_provider_ = source_provider; |
} |
void WebAudioCapturerSource::Stop() { |
- started_ = false; |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
+ base::AutoLock auto_lock(lock_); |
+ track_ = NULL; |
+ source_provider_ = NULL; |
} |
void WebAudioCapturerSource::consumeAudio( |
const WebKit::WebVector<const float*>& audio_data, |
size_t number_of_frames) { |
base::AutoLock auto_lock(lock_); |
- |
- if (!callback_) |
+ if (!track_) |
return; |
wrapper_bus_->set_frames(number_of_frames); |
// Make sure WebKit is honoring what it told us up front |
// about the channels. |
- DCHECK_EQ(set_format_channels_, static_cast<int>(audio_data.size())); |
- DCHECK_EQ(set_format_channels_, wrapper_bus_->channels()); |
+ DCHECK_EQ(params_.channels(), static_cast<int>(audio_data.size())); |
for (size_t i = 0; i < audio_data.size(); ++i) |
wrapper_bus_->SetChannelData(i, const_cast<float*>(audio_data[i])); |
@@ -88,15 +102,20 @@ void WebAudioCapturerSource::consumeAudio( |
// Handle mismatch between WebAudio buffer-size and WebRTC. |
int available = fifo_->max_frames() - fifo_->frames(); |
if (available < static_cast<int>(number_of_frames)) { |
- LOG(ERROR) << "WebAudioCapturerSource::Consume() : FIFO overrun."; |
+ NOTREACHED() << "WebAudioCapturerSource::Consume() : FIFO overrun."; |
return; |
} |
fifo_->Push(wrapper_bus_.get()); |
int capture_frames = params_.frames_per_buffer(); |
+ int delay_ms = 0; |
+ int volume = 0; |
+ bool key_pressed = false; |
while (fifo_->frames() >= capture_frames) { |
+ source_provider_->GetAudioProcessingParams( |
+ &delay_ms, &volume, &key_pressed); |
fifo_->Consume(capture_bus_.get(), 0, capture_frames); |
- callback_->Capture(capture_bus_.get(), 0, 1.0, false); |
+ track_->Capture(capture_bus_.get(), delay_ms, volume, key_pressed); |
} |
} |