OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/media/webrtc_audio_capturer.h" | 5 #include "content/renderer/media/webrtc_audio_capturer.h" |
6 | 6 |
7 #include "base/bind.h" | 7 #include "base/bind.h" |
8 #include "base/logging.h" | 8 #include "base/logging.h" |
9 #include "base/metrics/histogram.h" | 9 #include "base/metrics/histogram.h" |
10 #include "base/strings/string_util.h" | 10 #include "base/strings/string_util.h" |
(...skipping 16 matching lines...) Expand all Loading... |
27 // will fail if the user selects any rate outside these ranges. | 27 // will fail if the user selects any rate outside these ranges. |
28 const int kValidInputRates[] = {96000, 48000, 44100, 32000, 16000, 8000}; | 28 const int kValidInputRates[] = {96000, 48000, 44100, 32000, 16000, 8000}; |
29 #elif defined(OS_LINUX) || defined(OS_OPENBSD) | 29 #elif defined(OS_LINUX) || defined(OS_OPENBSD) |
30 const int kValidInputRates[] = {48000, 44100}; | 30 const int kValidInputRates[] = {48000, 44100}; |
31 #elif defined(OS_ANDROID) | 31 #elif defined(OS_ANDROID) |
32 const int kValidInputRates[] = {48000, 44100}; | 32 const int kValidInputRates[] = {48000, 44100}; |
33 #else | 33 #else |
34 const int kValidInputRates[] = {44100}; | 34 const int kValidInputRates[] = {44100}; |
35 #endif | 35 #endif |
36 | 36 |
37 int GetBufferSizeForSampleRate(int sample_rate) { | |
38 int buffer_size = 0; | |
39 #if defined(OS_WIN) || defined(OS_MACOSX) | |
40 // Use a buffer size of 10ms. | |
41 buffer_size = (sample_rate / 100); | |
42 #elif defined(OS_LINUX) || defined(OS_OPENBSD) | |
43 // Based on tests using the current ALSA implementation in Chrome, we have | |
44 // found that the best combination is 20ms on the input side and 10ms on the | |
45 // output side. | |
46 buffer_size = 2 * sample_rate / 100; | |
47 #elif defined(OS_ANDROID) | |
48 // TODO(leozwang): Tune and adjust buffer size on Android. | |
49 buffer_size = 2 * sample_rate / 100; | |
50 #endif | |
51 return buffer_size; | |
52 } | |
53 | |
54 } // namespace | 37 } // namespace |
55 | 38 |
56 // This is a temporary audio buffer with parameters used to send data to | |
57 // callbacks. | |
58 class WebRtcAudioCapturer::ConfiguredBuffer : | |
59 public base::RefCounted<WebRtcAudioCapturer::ConfiguredBuffer> { | |
60 public: | |
61 ConfiguredBuffer() {} | |
62 | |
63 bool Initialize(int sample_rate, | |
64 media::ChannelLayout channel_layout) { | |
65 int buffer_size = GetBufferSizeForSampleRate(sample_rate); | |
66 DVLOG(1) << "Using WebRTC input buffer size: " << buffer_size; | |
67 | |
68 media::AudioParameters::Format format = | |
69 media::AudioParameters::AUDIO_PCM_LOW_LATENCY; | |
70 | |
71 // bits_per_sample is always 16 for now. | |
72 int bits_per_sample = 16; | |
73 int channels = ChannelLayoutToChannelCount(channel_layout); | |
74 params_.Reset(format, channel_layout, channels, 0, | |
75 sample_rate, bits_per_sample, buffer_size); | |
76 buffer_.reset(new int16[params_.frames_per_buffer() * params_.channels()]); | |
77 | |
78 return true; | |
79 } | |
80 | |
81 int16* buffer() const { return buffer_.get(); } | |
82 const media::AudioParameters& params() const { return params_; } | |
83 | |
84 private: | |
85 ~ConfiguredBuffer() {} | |
86 friend class base::RefCounted<WebRtcAudioCapturer::ConfiguredBuffer>; | |
87 | |
88 scoped_ptr<int16[]> buffer_; | |
89 | |
90 // Cached values of utilized audio parameters. | |
91 media::AudioParameters params_; | |
92 }; | |
93 | |
94 // Reference counted container of WebRtcLocalAudioTrack delegate. | 39 // Reference counted container of WebRtcLocalAudioTrack delegate. |
95 class WebRtcAudioCapturer::TrackOwner | 40 class WebRtcAudioCapturer::TrackOwner |
96 : public base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner> { | 41 : public base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner> { |
97 public: | 42 public: |
98 explicit TrackOwner(WebRtcLocalAudioTrack* track) | 43 explicit TrackOwner(WebRtcLocalAudioTrack* track) |
99 : delegate_(track) {} | 44 : delegate_(track) {} |
100 | 45 |
101 void CaptureData(const int16* audio_data, | 46 void Capture(media::AudioBus* audio_source, |
102 int number_of_channels, | 47 int audio_delay_milliseconds, |
103 int number_of_frames, | 48 double volume, |
104 int audio_delay_milliseconds, | 49 bool key_pressed) { |
105 int volume, | |
106 bool key_pressed) { | |
107 base::AutoLock lock(lock_); | 50 base::AutoLock lock(lock_); |
108 if (delegate_) { | 51 if (delegate_) { |
109 delegate_->CaptureData(audio_data, | 52 delegate_->Capture(audio_source, |
110 number_of_channels, | 53 audio_delay_milliseconds, |
111 number_of_frames, | 54 volume, |
112 audio_delay_milliseconds, | 55 key_pressed); |
113 volume, | |
114 key_pressed); | |
115 } | 56 } |
116 } | 57 } |
117 | 58 |
118 void SetCaptureFormat(const media::AudioParameters& params) { | 59 void SetCaptureFormat(const media::AudioParameters& params) { |
119 base::AutoLock lock(lock_); | 60 base::AutoLock lock(lock_); |
120 if (delegate_) | 61 if (delegate_) |
121 delegate_->SetCaptureFormat(params); | 62 delegate_->SetCaptureFormat(params); |
122 } | 63 } |
123 | 64 |
124 void Reset() { | 65 void Reset() { |
(...skipping 29 matching lines...) Expand all Loading... |
154 | 95 |
155 DISALLOW_COPY_AND_ASSIGN(TrackOwner); | 96 DISALLOW_COPY_AND_ASSIGN(TrackOwner); |
156 }; | 97 }; |
157 | 98 |
158 // static | 99 // static |
159 scoped_refptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer() { | 100 scoped_refptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer() { |
160 scoped_refptr<WebRtcAudioCapturer> capturer = new WebRtcAudioCapturer(); | 101 scoped_refptr<WebRtcAudioCapturer> capturer = new WebRtcAudioCapturer(); |
161 return capturer; | 102 return capturer; |
162 } | 103 } |
163 | 104 |
164 bool WebRtcAudioCapturer::Reconfigure(int sample_rate, | 105 void WebRtcAudioCapturer::Reconfigure(int sample_rate, |
165 media::ChannelLayout channel_layout) { | 106 media::ChannelLayout channel_layout) { |
166 scoped_refptr<ConfiguredBuffer> new_buffer(new ConfiguredBuffer()); | 107 DCHECK(thread_checker_.CalledOnValidThread()); |
167 if (!new_buffer->Initialize(sample_rate, channel_layout)) | 108 int buffer_size = GetBufferSize(sample_rate); |
168 return false; | 109 DVLOG(1) << "Using WebRTC input buffer size: " << buffer_size; |
| 110 |
| 111 media::AudioParameters::Format format = |
| 112 media::AudioParameters::AUDIO_PCM_LOW_LATENCY; |
| 113 |
| 114 // bits_per_sample is always 16 for now. |
| 115 int bits_per_sample = 16; |
| 116 media::AudioParameters params(format, channel_layout, sample_rate, |
| 117 bits_per_sample, buffer_size); |
169 | 118 |
170 TrackList tracks; | 119 TrackList tracks; |
171 { | 120 { |
172 base::AutoLock auto_lock(lock_); | 121 base::AutoLock auto_lock(lock_); |
173 | |
174 buffer_ = new_buffer; | |
175 tracks = tracks_; | 122 tracks = tracks_; |
| 123 params_ = params; |
176 } | 124 } |
177 | 125 |
178 // Tell all audio_tracks which format we use. | 126 // Tell all audio_tracks which format we use. |
179 for (TrackList::const_iterator it = tracks.begin(); | 127 for (TrackList::const_iterator it = tracks.begin(); |
180 it != tracks.end(); ++it) | 128 it != tracks.end(); ++it) |
181 (*it)->SetCaptureFormat(new_buffer->params()); | 129 (*it)->SetCaptureFormat(params); |
182 | |
183 return true; | |
184 } | 130 } |
185 | 131 |
186 bool WebRtcAudioCapturer::Initialize(int render_view_id, | 132 bool WebRtcAudioCapturer::Initialize(int render_view_id, |
187 media::ChannelLayout channel_layout, | 133 media::ChannelLayout channel_layout, |
188 int sample_rate, | 134 int sample_rate, |
| 135 int buffer_size, |
189 int session_id, | 136 int session_id, |
190 const std::string& device_id) { | 137 const std::string& device_id) { |
191 DCHECK(thread_checker_.CalledOnValidThread()); | 138 DCHECK(thread_checker_.CalledOnValidThread()); |
| 139 DCHECK_GE(render_view_id, 0); |
192 DVLOG(1) << "WebRtcAudioCapturer::Initialize()"; | 140 DVLOG(1) << "WebRtcAudioCapturer::Initialize()"; |
193 | 141 |
194 DVLOG(1) << "Audio input hardware channel layout: " << channel_layout; | 142 DVLOG(1) << "Audio input hardware channel layout: " << channel_layout; |
195 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputChannelLayout", | 143 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputChannelLayout", |
196 channel_layout, media::CHANNEL_LAYOUT_MAX); | 144 channel_layout, media::CHANNEL_LAYOUT_MAX); |
197 | 145 |
| 146 render_view_id_ = render_view_id; |
198 session_id_ = session_id; | 147 session_id_ = session_id; |
199 device_id_ = device_id; | 148 device_id_ = device_id; |
| 149 hardware_buffer_size_ = buffer_size; |
| 150 |
200 if (render_view_id == -1) { | 151 if (render_view_id == -1) { |
201 // This capturer is used by WebAudio, return true without creating a | 152 // Return true here to allow injecting a new source via SetCapturerSource() |
202 // default capturing source. WebAudio will inject its own source via | 153 // at a later state. |
203 // SetCapturerSource() at a later state. | |
204 DCHECK(device_id.empty()); | |
205 return true; | 154 return true; |
206 } | 155 } |
207 | 156 |
208 // Verify that the reported input channel configuration is supported. | 157 // Verify that the reported input channel configuration is supported. |
209 if (channel_layout != media::CHANNEL_LAYOUT_MONO && | 158 if (channel_layout != media::CHANNEL_LAYOUT_MONO && |
210 channel_layout != media::CHANNEL_LAYOUT_STEREO) { | 159 channel_layout != media::CHANNEL_LAYOUT_STEREO) { |
211 DLOG(ERROR) << channel_layout | 160 DLOG(ERROR) << channel_layout |
212 << " is not a supported input channel configuration."; | 161 << " is not a supported input channel configuration."; |
213 return false; | 162 return false; |
214 } | 163 } |
(...skipping 10 matching lines...) Expand all Loading... |
225 // Verify that the reported input hardware sample rate is supported | 174 // Verify that the reported input hardware sample rate is supported |
226 // on the current platform. | 175 // on the current platform. |
227 if (std::find(&kValidInputRates[0], | 176 if (std::find(&kValidInputRates[0], |
228 &kValidInputRates[0] + arraysize(kValidInputRates), | 177 &kValidInputRates[0] + arraysize(kValidInputRates), |
229 sample_rate) == | 178 sample_rate) == |
230 &kValidInputRates[arraysize(kValidInputRates)]) { | 179 &kValidInputRates[arraysize(kValidInputRates)]) { |
231 DLOG(ERROR) << sample_rate << " is not a supported input rate."; | 180 DLOG(ERROR) << sample_rate << " is not a supported input rate."; |
232 return false; | 181 return false; |
233 } | 182 } |
234 | 183 |
235 if (!Reconfigure(sample_rate, channel_layout)) | 184 Reconfigure(sample_rate, channel_layout); |
236 return false; | |
237 | 185 |
238 // Create and configure the default audio capturing source. The |source_| | 186 // Create and configure the default audio capturing source. The |source_| |
239 // will be overwritten if an external client later calls SetCapturerSource() | 187 // will be overwritten if an external client later calls SetCapturerSource() |
240 // providing an alternative media::AudioCapturerSource. | 188 // providing an alternative media::AudioCapturerSource. |
241 SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id), | 189 SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id), |
242 channel_layout, | 190 channel_layout, |
243 static_cast<float>(sample_rate)); | 191 static_cast<float>(sample_rate)); |
244 | 192 |
245 return true; | 193 return true; |
246 } | 194 } |
247 | 195 |
248 WebRtcAudioCapturer::WebRtcAudioCapturer() | 196 WebRtcAudioCapturer::WebRtcAudioCapturer() |
249 : source_(NULL), | 197 : source_(NULL), |
250 running_(false), | 198 running_(false), |
251 agc_is_enabled_(false), | 199 agc_is_enabled_(false), |
| 200 render_view_id_(-1), |
| 201 hardware_buffer_size_(0), |
252 session_id_(0), | 202 session_id_(0), |
253 volume_(0) { | 203 volume_(0), |
| 204 source_provider_(new WebRtcLocalAudioSourceProvider()), |
| 205 peer_connection_mode_(false) { |
| 206 DCHECK(source_provider_.get()); |
254 DVLOG(1) << "WebRtcAudioCapturer::WebRtcAudioCapturer()"; | 207 DVLOG(1) << "WebRtcAudioCapturer::WebRtcAudioCapturer()"; |
255 } | 208 } |
256 | 209 |
257 WebRtcAudioCapturer::~WebRtcAudioCapturer() { | 210 WebRtcAudioCapturer::~WebRtcAudioCapturer() { |
258 DCHECK(thread_checker_.CalledOnValidThread()); | 211 DCHECK(thread_checker_.CalledOnValidThread()); |
259 DCHECK(tracks_.empty()); | 212 DCHECK(tracks_.empty()); |
260 DCHECK(!running_); | 213 DCHECK(!running_); |
261 DVLOG(1) << "WebRtcAudioCapturer::~WebRtcAudioCapturer()"; | 214 DVLOG(1) << "WebRtcAudioCapturer::~WebRtcAudioCapturer()"; |
262 } | 215 } |
263 | 216 |
264 void WebRtcAudioCapturer::AddTrack(WebRtcLocalAudioTrack* track) { | 217 void WebRtcAudioCapturer::AddTrack(WebRtcLocalAudioTrack* track) { |
265 DCHECK(track); | 218 DCHECK(track); |
266 DVLOG(1) << "WebRtcAudioCapturer::AddTrack()"; | 219 DVLOG(1) << "WebRtcAudioCapturer::AddTrack()"; |
267 | 220 |
268 // Start the source if the first audio track is connected to the capturer. | 221 // Start the source if the first audio track is connected to the capturer. |
269 // Start() will do nothing if the capturer has already been started. | 222 // Start() will do nothing if the capturer has already been started. |
270 Start(); | 223 Start(); |
271 | 224 |
272 base::AutoLock auto_lock(lock_); | 225 base::AutoLock auto_lock(lock_); |
273 // Verify that |track| is not already added to the list. | 226 // Verify that |track| is not already added to the list. |
274 DCHECK(std::find_if(tracks_.begin(), tracks_.end(), | 227 DCHECK(std::find_if(tracks_.begin(), tracks_.end(), |
275 TrackOwner::TrackWrapper(track)) == tracks_.end()); | 228 TrackOwner::TrackWrapper(track)) == tracks_.end()); |
276 | 229 |
277 if (buffer_.get()) { | 230 track->SetCaptureFormat(params_); |
278 track->SetCaptureFormat(buffer_->params()); | |
279 } | |
280 | |
281 tracks_.push_back(new WebRtcAudioCapturer::TrackOwner(track)); | 231 tracks_.push_back(new WebRtcAudioCapturer::TrackOwner(track)); |
282 } | 232 } |
283 | 233 |
284 void WebRtcAudioCapturer::RemoveTrack(WebRtcLocalAudioTrack* track) { | 234 void WebRtcAudioCapturer::RemoveTrack(WebRtcLocalAudioTrack* track) { |
285 DCHECK(thread_checker_.CalledOnValidThread()); | 235 DCHECK(thread_checker_.CalledOnValidThread()); |
286 | 236 |
287 bool stop_source = false; | 237 bool stop_source = false; |
288 { | 238 { |
289 base::AutoLock auto_lock(lock_); | 239 base::AutoLock auto_lock(lock_); |
290 // Get iterator to the first element for which WrapsSink(track) returns | 240 // Get iterator to the first element for which WrapsSink(track) returns |
(...skipping 17 matching lines...) Expand all Loading... |
308 } | 258 } |
309 | 259 |
310 void WebRtcAudioCapturer::SetCapturerSource( | 260 void WebRtcAudioCapturer::SetCapturerSource( |
311 const scoped_refptr<media::AudioCapturerSource>& source, | 261 const scoped_refptr<media::AudioCapturerSource>& source, |
312 media::ChannelLayout channel_layout, | 262 media::ChannelLayout channel_layout, |
313 float sample_rate) { | 263 float sample_rate) { |
314 DCHECK(thread_checker_.CalledOnValidThread()); | 264 DCHECK(thread_checker_.CalledOnValidThread()); |
315 DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << "," | 265 DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << "," |
316 << "sample_rate=" << sample_rate << ")"; | 266 << "sample_rate=" << sample_rate << ")"; |
317 scoped_refptr<media::AudioCapturerSource> old_source; | 267 scoped_refptr<media::AudioCapturerSource> old_source; |
318 scoped_refptr<ConfiguredBuffer> current_buffer; | |
319 bool restart_source = false; | 268 bool restart_source = false; |
320 { | 269 { |
321 base::AutoLock auto_lock(lock_); | 270 base::AutoLock auto_lock(lock_); |
322 if (source_.get() == source.get()) | 271 if (source_.get() == source.get()) |
323 return; | 272 return; |
324 | 273 |
325 source_.swap(old_source); | 274 source_.swap(old_source); |
326 source_ = source; | 275 source_ = source; |
327 current_buffer = buffer_; | |
328 | 276 |
329 // Reset the flag to allow starting the new source. | 277 // Reset the flag to allow starting the new source. |
330 restart_source = running_; | 278 restart_source = running_; |
331 running_ = false; | 279 running_ = false; |
332 } | 280 } |
333 | 281 |
334 const bool no_default_audio_source_exists = !current_buffer.get(); | 282 DVLOG(1) << "Switching to a new capture source."; |
| 283 if (old_source.get()) |
| 284 old_source->Stop(); |
335 | 285 |
336 // Detach the old source from normal recording or perform first-time | 286 // Dispatch the new parameters both to the sink(s) and to the new source. |
337 // initialization if Initialize() has never been called. For the second | 287 // The idea is to get rid of any dependency of the microphone parameters |
338 // case, the caller is not "taking over an ongoing session" but instead | 288 // which would normally be used by default. |
339 // "taking control over a new session". | 289 Reconfigure(sample_rate, channel_layout); |
340 if (old_source.get() || no_default_audio_source_exists) { | |
341 DVLOG(1) << "New capture source will now be utilized."; | |
342 if (old_source.get()) | |
343 old_source->Stop(); | |
344 | 290 |
345 // Dispatch the new parameters both to the sink(s) and to the new source. | 291 // Make sure to grab the new parameters in case they were reconfigured. |
346 // The idea is to get rid of any dependency of the microphone parameters | 292 media::AudioParameters params = audio_parameters(); |
347 // which would normally be used by default. | 293 source_provider_->Initialize(params); |
348 if (!Reconfigure(sample_rate, channel_layout)) { | 294 if (source.get()) |
349 return; | 295 source->Initialize(params, this, session_id_); |
350 } else { | |
351 // The buffer has been reconfigured. Update |current_buffer|. | |
352 base::AutoLock auto_lock(lock_); | |
353 current_buffer = buffer_; | |
354 } | |
355 } | |
356 | |
357 if (source.get()) { | |
358 // Make sure to grab the new parameters in case they were reconfigured. | |
359 source->Initialize(current_buffer->params(), this, session_id_); | |
360 } | |
361 | 296 |
362 if (restart_source) | 297 if (restart_source) |
363 Start(); | 298 Start(); |
364 } | 299 } |
365 | 300 |
| 301 void WebRtcAudioCapturer::EnablePeerConnectionMode() { |
| 302 DCHECK(thread_checker_.CalledOnValidThread()); |
| 303 DVLOG(1) << "EnablePeerConnectionMode"; |
| 304 // Do nothing if the peer connection mode has been enabled. |
| 305 if (peer_connection_mode_) |
| 306 return; |
| 307 |
| 308 peer_connection_mode_ = true; |
| 309 int render_view_id = -1; |
| 310 { |
| 311 base::AutoLock auto_lock(lock_); |
| 312 // Simply return if there is no existing source or the |render_view_id_| is |
| 313 // not valid. |
| 314 if (!source_.get() || render_view_id_== -1) |
| 315 return; |
| 316 |
| 317 render_view_id = render_view_id_; |
| 318 } |
| 319 |
| 320 // Create a new audio stream as source which will open the hardware using |
| 321 // WebRtc native buffer size. |
| 322 media::AudioParameters params = audio_parameters(); |
| 323 SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id), |
| 324 params.channel_layout(), |
| 325 static_cast<float>(params.sample_rate())); |
| 326 } |
| 327 |
366 void WebRtcAudioCapturer::Start() { | 328 void WebRtcAudioCapturer::Start() { |
367 DVLOG(1) << "WebRtcAudioCapturer::Start()"; | 329 DVLOG(1) << "WebRtcAudioCapturer::Start()"; |
368 base::AutoLock auto_lock(lock_); | 330 base::AutoLock auto_lock(lock_); |
369 if (running_) | 331 if (running_) |
370 return; | 332 return; |
371 | 333 |
372 // Start the data source, i.e., start capturing data from the current source. | 334 // Start the data source, i.e., start capturing data from the current source. |
373 // Note that, the source does not have to be a microphone. | 335 // Note that, the source does not have to be a microphone. |
374 if (source_.get()) { | 336 if (source_.get()) { |
375 // We need to set the AGC control before starting the stream. | 337 // We need to set the AGC control before starting the stream. |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
436 #elif defined(OS_LINUX) || defined(OS_OPENBSD) | 398 #elif defined(OS_LINUX) || defined(OS_OPENBSD) |
437 // We have a special situation on Linux where the microphone volume can be | 399 // We have a special situation on Linux where the microphone volume can be |
438 // "higher than maximum". The input volume slider in the sound preference | 400 // "higher than maximum". The input volume slider in the sound preference |
439 // allows the user to set a scaling that is higher than 100%. It means that | 401 // allows the user to set a scaling that is higher than 100%. It means that |
440 // even if the reported maximum levels is N, the actual microphone level can | 402 // even if the reported maximum levels is N, the actual microphone level can |
441 // go up to 1.5x*N and that corresponds to a normalized |volume| of 1.5x. | 403 // go up to 1.5x*N and that corresponds to a normalized |volume| of 1.5x. |
442 DCHECK_LE(volume, 1.6); | 404 DCHECK_LE(volume, 1.6); |
443 #endif | 405 #endif |
444 | 406 |
445 TrackList tracks; | 407 TrackList tracks; |
446 scoped_refptr<ConfiguredBuffer> buffer_ref_while_calling; | 408 int current_volume = 0; |
447 { | 409 { |
448 base::AutoLock auto_lock(lock_); | 410 base::AutoLock auto_lock(lock_); |
449 if (!running_) | 411 if (!running_) |
450 return; | 412 return; |
451 | 413 |
452 // Map internal volume range of [0.0, 1.0] into [0, 255] used by the | 414 // Map internal volume range of [0.0, 1.0] into [0, 255] used by the |
453 // webrtc::VoiceEngine. webrtc::VoiceEngine will handle the case when the | 415 // webrtc::VoiceEngine. webrtc::VoiceEngine will handle the case when the |
454 // volume is higher than 255. | 416 // volume is higher than 255. |
455 volume_ = static_cast<int>((volume * MaxVolume()) + 0.5); | 417 volume_ = static_cast<int>((volume * MaxVolume()) + 0.5); |
456 | 418 current_volume = volume_; |
457 // Copy the stuff we will need to local variables. In particular, we grab | |
458 // a reference to the buffer so we can ensure it stays alive even if the | |
459 // buffer is reconfigured while we are calling back. | |
460 buffer_ref_while_calling = buffer_; | |
461 tracks = tracks_; | 419 tracks = tracks_; |
462 } | 420 } |
463 | 421 |
464 int bytes_per_sample = | 422 // Deliver captured data to source provider, which stores the data into FIFO |
465 buffer_ref_while_calling->params().bits_per_sample() / 8; | 423 // for WebAudio to fetch. |
466 | 424 source_provider_->DeliverData(audio_source, audio_delay_milliseconds, |
467 // Interleave, scale, and clip input to int and store result in | 425 current_volume, key_pressed); |
468 // a local byte buffer. | |
469 audio_source->ToInterleaved(audio_source->frames(), bytes_per_sample, | |
470 buffer_ref_while_calling->buffer()); | |
471 | 426 |
472 // Feed the data to the tracks. | 427 // Feed the data to the tracks. |
473 for (TrackList::const_iterator it = tracks.begin(); | 428 for (TrackList::const_iterator it = tracks.begin(); |
474 it != tracks.end(); | 429 it != tracks.end(); |
475 ++it) { | 430 ++it) { |
476 (*it)->CaptureData(buffer_ref_while_calling->buffer(), | 431 (*it)->Capture(audio_source, audio_delay_milliseconds, |
477 audio_source->channels(), | 432 current_volume, key_pressed); |
478 audio_source->frames(), | |
479 audio_delay_milliseconds, | |
480 volume, | |
481 key_pressed); | |
482 } | 433 } |
483 } | 434 } |
484 | 435 |
485 void WebRtcAudioCapturer::OnCaptureError() { | 436 void WebRtcAudioCapturer::OnCaptureError() { |
486 NOTIMPLEMENTED(); | 437 NOTIMPLEMENTED(); |
487 } | 438 } |
488 | 439 |
489 media::AudioParameters WebRtcAudioCapturer::audio_parameters() const { | 440 media::AudioParameters WebRtcAudioCapturer::audio_parameters() const { |
490 base::AutoLock auto_lock(lock_); | 441 base::AutoLock auto_lock(lock_); |
491 // |buffer_| can be NULL when SetCapturerSource() or Initialize() has not | 442 return params_; |
492 // been called. | 443 } |
493 return buffer_.get() ? buffer_->params() : media::AudioParameters(); | 444 |
| 445 int WebRtcAudioCapturer::GetBufferSize(int sample_rate) const { |
| 446 DCHECK(thread_checker_.CalledOnValidThread()); |
| 447 #if defined(OS_ANDROID) |
| 448 // TODO(henrika): Tune and adjust buffer size on Android. |
| 449 return (2 * sample_rate / 100); |
| 450 #endif |
| 451 |
| 452 // Use the native hardware buffer size in non peer connection mode. |
| 453 if (!peer_connection_mode_ && hardware_buffer_size_) |
| 454 return hardware_buffer_size_; |
| 455 |
| 456 // WebRtc is running at a buffer size of 10ms data. Use a multiple of 10ms |
| 457 // as the buffer size to achieve the best performance for WebRtc. |
| 458 return (sample_rate / 100); |
494 } | 459 } |
495 | 460 |
496 } // namespace content | 461 } // namespace content |
OLD | NEW |