Chromium Code Reviews| Index: content/renderer/media/renderer_webaudiodevice_impl.cc |
| diff --git a/content/renderer/media/renderer_webaudiodevice_impl.cc b/content/renderer/media/renderer_webaudiodevice_impl.cc |
| index a9fba3b4e21ec8f451330dc4d22977766a1eed8c..414175ae07f0a1078969676eb79d4a8779e74f59 100644 |
| --- a/content/renderer/media/renderer_webaudiodevice_impl.cc |
| +++ b/content/renderer/media/renderer_webaudiodevice_impl.cc |
| @@ -17,6 +17,7 @@ |
| #include "content/renderer/media/audio_device_factory.h" |
| #include "content/renderer/render_frame_impl.h" |
| #include "media/audio/null_audio_sink.h" |
| +#include "media/base/audio_pull_fifo.h" |
| #include "media/base/media_switches.h" |
| #include "third_party/WebKit/public/web/WebLocalFrame.h" |
| #include "third_party/WebKit/public/web/WebView.h" |
| @@ -26,6 +27,30 @@ using blink::WebLocalFrame; |
| using blink::WebVector; |
| using blink::WebView; |
| +namespace { |
| + |
| +int CalculateRenderBufferSizer(int default_sink_frames_per_buffer) { |
| +#if defined(OS_ANDROID) |
| + // The optimum low-latency hardware buffer size is usually too small on |
| + // Android for WebAudio to render without glitching. So, if it is small, use |
| + // a larger size. |
| + // |
| + // Since WebAudio renders in 128-frame blocks, the small buffer sizes (144 for |
| + // a Galaxy Nexus), cause significant processing jitter. Sometimes multiple |
| + // blocks will processed, but other times will not be since the WebAudio can't |
| + // satisfy the request. By using a larger render buffer size, we smooth out |
| + // the jitter. |
| + const int kSmallBufferSize = 1024; |
| + const int kDefaultCallbackBufferSize = 2048; |
| + if (default_sink_frames_per_buffer <= kSmallBufferSize) |
| + return kDefaultCallbackBufferSize; |
| +#endif |
| + |
| + return default_sink_frames_per_buffer; |
| +} |
| + |
| +} // namespace |
| + |
| namespace content { |
| #if defined(OS_ANDROID) |
| @@ -37,18 +62,16 @@ RendererWebAudioDeviceImpl::RendererWebAudioDeviceImpl( |
| WebAudioDevice::RenderCallback* callback, |
| int session_id, |
| const url::Origin& security_origin) |
| - : params_(params), |
| + : client_params_(params), |
| client_callback_(callback), |
| session_id_(session_id), |
| task_runner_(base::ThreadTaskRunnerHandle::Get()), |
| null_audio_sink_(new media::NullAudioSink(task_runner_)), |
| is_using_null_audio_sink_(false), |
| - first_buffer_after_silence_(media::AudioBus::Create(params_)), |
| + first_buffer_after_silence_(media::AudioBus::Create(client_params_)), |
| is_first_buffer_after_silence_(false), |
| security_origin_(security_origin) { |
| DCHECK(client_callback_); |
| - null_audio_sink_->Initialize(params_, this); |
| - null_audio_sink_->Start(); |
| } |
| RendererWebAudioDeviceImpl::~RendererWebAudioDeviceImpl() { |
| @@ -75,11 +98,22 @@ void RendererWebAudioDeviceImpl::start() { |
| AudioDeviceFactory::kSourceWebAudio, |
| render_frame ? render_frame->GetRoutingID() : MSG_ROUTING_NONE, |
| session_id_, std::string(), security_origin_); |
| - sink_->Initialize(params_, this); |
| - sink_->Start(); |
| - sink_->Play(); |
| + |
| + // Output basing on default sink buffer size. |
| + media::AudioParameters output_params(client_params_); |
| + output_params.set_frames_per_buffer(CalculateRenderBufferSizer( |
| + sink_->GetOutputDeviceInfo().output_params().frames_per_buffer())); |
| + |
| + CreateFifoIfRequired(output_params.frames_per_buffer()); |
| + |
| + null_audio_sink_->Initialize(output_params, this); |
| + null_audio_sink_->Start(); |
| start_null_audio_sink_callback_.Reset( |
| base::Bind(&media::NullAudioSink::Play, null_audio_sink_)); |
|
hongchan
2016/05/26 16:43:33
I am ignorant on the renderer code, but why do we
o1ka
2016/05/26 16:47:47
It was introduce in this CL https://codereview.chr
|
| + |
| + sink_->Initialize(output_params, this); |
| + sink_->Start(); |
| + sink_->Play(); |
| // Note: Default behavior is to auto-play on start. |
| } |
| @@ -97,18 +131,37 @@ void RendererWebAudioDeviceImpl::stop() { |
| } |
| double RendererWebAudioDeviceImpl::sampleRate() { |
| - return params_.sample_rate(); |
| + return client_params_.sample_rate(); |
| } |
| int RendererWebAudioDeviceImpl::Render(media::AudioBus* dest, |
| uint32_t frames_delayed, |
| uint32_t frames_skipped) { |
| #if defined(OS_ANDROID) |
| + // There can be a race in Render() on Android (https://crbug.com/614978), so |
| + // don't try to inject the FIFO dynamically, just rely on the initialization. |
| + DCHECK(audio_fifo_ || (dest->frames() == client_params_.frames_per_buffer())); |
| +#else |
| + // Allow Render() to work on varying buffer size. |
| + CreateFifoIfRequired(dest->frames()); |
| +#endif |
| + |
| + if (audio_fifo_) |
| + audio_fifo_->Consume(dest, dest->frames()); |
| + else |
| + SourceCallback(0, dest); |
| + |
| + return dest->frames(); |
| +} |
| + |
| +void RendererWebAudioDeviceImpl::SourceCallback(int fifo_frame_delay, |
| + media::AudioBus* dest) { |
| +#if defined(OS_ANDROID) |
| if (is_first_buffer_after_silence_) { |
| DCHECK(!is_using_null_audio_sink_); |
| first_buffer_after_silence_->CopyTo(dest); |
| is_first_buffer_after_silence_ = false; |
| - return dest->frames(); |
| + return; |
| } |
| #endif |
| // Wrap the output pointers using WebVector. |
| @@ -140,7 +193,7 @@ int RendererWebAudioDeviceImpl::Render(media::AudioBus* dest, |
| // Calling sink_->Play() may trigger reentrancy into this |
| // function, so this should be called at the end. |
| sink_->Play(); |
| - return dest->frames(); |
| + return; |
| } |
| } else if (!is_using_null_audio_sink_) { |
| // Called on the audio device thread. |
| @@ -153,18 +206,30 @@ int RendererWebAudioDeviceImpl::Render(media::AudioBus* dest, |
| is_using_null_audio_sink_ = true; |
| // If Stop() is called right after the task is posted, need to cancel |
| // this task. |
| - task_runner_->PostDelayedTask( |
| - FROM_HERE, |
| - start_null_audio_sink_callback_.callback(), |
| - params_.GetBufferDuration()); |
| + task_runner_->PostDelayedTask(FROM_HERE, |
| + start_null_audio_sink_callback_.callback(), |
| + client_params_.GetBufferDuration()); |
| } |
| } |
| #endif |
| - return dest->frames(); |
| } |
| void RendererWebAudioDeviceImpl::OnRenderError() { |
| // TODO(crogers): implement error handling. |
| } |
| +void RendererWebAudioDeviceImpl::CreateFifoIfRequired( |
| + int render_frames_per_buffer) { |
| + if (!audio_fifo_ && |
| + render_frames_per_buffer != client_params_.frames_per_buffer()) { |
| + audio_fifo_.reset(new media::AudioPullFifo( |
| + client_params_.channels(), client_params_.frames_per_buffer(), |
| + base::Bind(&RendererWebAudioDeviceImpl::SourceCallback, |
| + base::Unretained(this)))); |
| + DVLOG(1) << "Client buffer size: " << client_params_.frames_per_buffer() |
| + << " output buffer size: " << render_frames_per_buffer |
| + << "; fifo injected."; |
| + } |
| +} |
| + |
| } // namespace content |