OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/filters/audio_renderer_base.h" | 5 #include "media/filters/audio_renderer_base.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <math.h> |
8 #include <string> | |
9 | 8 |
10 #include "base/bind.h" | 9 #include "base/bind.h" |
11 #include "base/callback.h" | 10 #include "base/callback.h" |
12 #include "base/callback_helpers.h" | 11 #include "base/callback_helpers.h" |
13 #include "base/logging.h" | 12 #include "base/logging.h" |
14 #include "media/base/filter_host.h" | 13 #include "media/base/filter_host.h" |
14 #include "media/audio/audio_util.h" | |
15 | 15 |
16 namespace media { | 16 namespace media { |
17 | 17 |
18 AudioRendererBase::AudioRendererBase() | 18 AudioRendererBase::AudioRendererBase(media::AudioRendererSink* sink) |
19 : state_(kUninitialized), | 19 : state_(kUninitialized), |
20 pending_read_(false), | 20 pending_read_(false), |
21 received_end_of_stream_(false), | 21 received_end_of_stream_(false), |
22 rendered_end_of_stream_(false), | 22 rendered_end_of_stream_(false), |
23 bytes_per_frame_(0), | 23 bytes_per_frame_(0), |
24 bytes_per_second_(0), | |
25 stopped_(false), | |
26 sink_(sink), | |
27 is_initialized_(false), | |
24 read_cb_(base::Bind(&AudioRendererBase::DecodedAudioReady, | 28 read_cb_(base::Bind(&AudioRendererBase::DecodedAudioReady, |
25 base::Unretained(this))) { | 29 base::Unretained(this))) { |
26 } | 30 } |
27 | 31 |
28 AudioRendererBase::~AudioRendererBase() { | 32 AudioRendererBase::~AudioRendererBase() { |
29 // Stop() should have been called and |algorithm_| should have been destroyed. | 33 // Stop() should have been called and |algorithm_| should have been destroyed. |
30 DCHECK(state_ == kUninitialized || state_ == kStopped); | 34 DCHECK(state_ == kUninitialized || state_ == kStopped); |
31 DCHECK(!algorithm_.get()); | 35 DCHECK(!algorithm_.get()); |
32 } | 36 } |
33 | 37 |
34 void AudioRendererBase::Play(const base::Closure& callback) { | 38 void AudioRendererBase::Play(const base::Closure& callback) { |
35 base::AutoLock auto_lock(lock_); | 39 { |
36 DCHECK_EQ(kPaused, state_); | 40 base::AutoLock auto_lock(lock_); |
37 state_ = kPlaying; | 41 DCHECK_EQ(kPaused, state_); |
38 callback.Run(); | 42 state_ = kPlaying; |
43 callback.Run(); | |
44 } | |
45 | |
46 if (stopped_) | |
47 return; | |
48 | |
49 if (GetPlaybackRate() != 0.0f) { | |
50 DoPlay(); | |
51 } else { | |
52 DoPause(); | |
53 } | |
54 } | |
55 | |
56 void AudioRendererBase::DoPlay() { | |
57 earliest_end_time_ = base::Time::Now(); | |
58 DCHECK(sink_.get()); | |
59 sink_->Play(); | |
39 } | 60 } |
40 | 61 |
41 void AudioRendererBase::Pause(const base::Closure& callback) { | 62 void AudioRendererBase::Pause(const base::Closure& callback) { |
42 base::AutoLock auto_lock(lock_); | 63 { |
43 DCHECK(state_ == kPlaying || state_ == kUnderflow || state_ == kRebuffering); | 64 base::AutoLock auto_lock(lock_); |
44 pause_cb_ = callback; | 65 DCHECK(state_ == kPlaying || state_ == kUnderflow || |
45 state_ = kPaused; | 66 state_ == kRebuffering); |
67 pause_cb_ = callback; | |
68 state_ = kPaused; | |
46 | 69 |
47 // Pause only when we've completed our pending read. | 70 // Pause only when we've completed our pending read. |
48 if (!pending_read_) { | 71 if (!pending_read_) { |
49 pause_cb_.Run(); | 72 pause_cb_.Run(); |
50 pause_cb_.Reset(); | 73 pause_cb_.Reset(); |
51 } else { | 74 } else { |
52 state_ = kPaused; | 75 state_ = kPaused; |
enal1
2012/04/03 16:12:20
We already set paused state in line 68.
vrk (LEFT CHROMIUM)
2012/04/03 18:27:52
Deleted this else branch.
| |
76 } | |
53 } | 77 } |
78 | |
79 if (stopped_) | |
80 return; | |
81 | |
82 DoPause(); | |
83 } | |
84 | |
85 void AudioRendererBase::DoPause() { | |
86 DCHECK(sink_.get()); | |
87 sink_->Pause(false); | |
54 } | 88 } |
55 | 89 |
56 void AudioRendererBase::Flush(const base::Closure& callback) { | 90 void AudioRendererBase::Flush(const base::Closure& callback) { |
57 decoder_->Reset(callback); | 91 decoder_->Reset(callback); |
58 } | 92 } |
59 | 93 |
60 void AudioRendererBase::Stop(const base::Closure& callback) { | 94 void AudioRendererBase::Stop(const base::Closure& callback) { |
61 OnStop(); | 95 if (!stopped_) { |
96 DCHECK(sink_.get()); | |
97 sink_->Stop(); | |
98 | |
99 stopped_ = true; | |
100 } | |
62 { | 101 { |
63 base::AutoLock auto_lock(lock_); | 102 base::AutoLock auto_lock(lock_); |
64 state_ = kStopped; | 103 state_ = kStopped; |
65 algorithm_.reset(NULL); | 104 algorithm_.reset(NULL); |
66 time_cb_.Reset(); | 105 time_cb_.Reset(); |
67 underflow_cb_.Reset(); | 106 underflow_cb_.Reset(); |
68 } | 107 } |
69 if (!callback.is_null()) { | 108 if (!callback.is_null()) { |
70 callback.Run(); | 109 callback.Run(); |
71 } | 110 } |
72 } | 111 } |
73 | 112 |
74 void AudioRendererBase::Seek(base::TimeDelta time, const PipelineStatusCB& cb) { | 113 void AudioRendererBase::Seek(base::TimeDelta time, const PipelineStatusCB& cb) { |
75 base::AutoLock auto_lock(lock_); | 114 base::AutoLock auto_lock(lock_); |
76 DCHECK_EQ(kPaused, state_); | 115 DCHECK_EQ(kPaused, state_); |
77 DCHECK(!pending_read_) << "Pending read must complete before seeking"; | 116 DCHECK(!pending_read_) << "Pending read must complete before seeking"; |
78 DCHECK(pause_cb_.is_null()); | 117 DCHECK(pause_cb_.is_null()); |
79 DCHECK(seek_cb_.is_null()); | 118 DCHECK(seek_cb_.is_null()); |
80 state_ = kSeeking; | 119 state_ = kSeeking; |
81 seek_cb_ = cb; | 120 seek_cb_ = cb; |
82 seek_timestamp_ = time; | 121 seek_timestamp_ = time; |
83 | 122 |
84 // Throw away everything and schedule our reads. | 123 // Throw away everything and schedule our reads. |
85 last_fill_buffer_time_ = base::TimeDelta(); | 124 audio_time_buffered_ = base::TimeDelta(); |
86 received_end_of_stream_ = false; | 125 received_end_of_stream_ = false; |
87 rendered_end_of_stream_ = false; | 126 rendered_end_of_stream_ = false; |
88 | 127 |
89 // |algorithm_| will request more reads. | 128 // |algorithm_| will request more reads. |
90 algorithm_->FlushBuffers(); | 129 algorithm_->FlushBuffers(); |
130 | |
131 if (stopped_) | |
132 return; | |
133 | |
134 DoSeek(); | |
135 } | |
136 | |
137 void AudioRendererBase::DoSeek() { | |
138 earliest_end_time_ = base::Time::Now(); | |
139 | |
140 // Pause and flush the stream when we seek to a new location. | |
141 sink_->Pause(true); | |
91 } | 142 } |
92 | 143 |
93 void AudioRendererBase::Initialize(const scoped_refptr<AudioDecoder>& decoder, | 144 void AudioRendererBase::Initialize(const scoped_refptr<AudioDecoder>& decoder, |
94 const PipelineStatusCB& init_cb, | 145 const PipelineStatusCB& init_cb, |
95 const base::Closure& underflow_cb, | 146 const base::Closure& underflow_cb, |
96 const TimeCB& time_cb) { | 147 const TimeCB& time_cb) { |
97 DCHECK(decoder); | 148 DCHECK(decoder); |
98 DCHECK(!init_cb.is_null()); | 149 DCHECK(!init_cb.is_null()); |
99 DCHECK(!underflow_cb.is_null()); | 150 DCHECK(!underflow_cb.is_null()); |
100 DCHECK(!time_cb.is_null()); | 151 DCHECK(!time_cb.is_null()); |
(...skipping 12 matching lines...) Expand all Loading... | |
113 // and a callback to request more reads from the data source. | 164 // and a callback to request more reads from the data source. |
114 ChannelLayout channel_layout = decoder_->channel_layout(); | 165 ChannelLayout channel_layout = decoder_->channel_layout(); |
115 int channels = ChannelLayoutToChannelCount(channel_layout); | 166 int channels = ChannelLayoutToChannelCount(channel_layout); |
116 int bits_per_channel = decoder_->bits_per_channel(); | 167 int bits_per_channel = decoder_->bits_per_channel(); |
117 int sample_rate = decoder_->samples_per_second(); | 168 int sample_rate = decoder_->samples_per_second(); |
118 // TODO(vrk): Add method to AudioDecoder to compute bytes per frame. | 169 // TODO(vrk): Add method to AudioDecoder to compute bytes per frame. |
119 bytes_per_frame_ = channels * bits_per_channel / 8; | 170 bytes_per_frame_ = channels * bits_per_channel / 8; |
120 | 171 |
121 bool config_ok = algorithm_->ValidateConfig(channels, sample_rate, | 172 bool config_ok = algorithm_->ValidateConfig(channels, sample_rate, |
122 bits_per_channel); | 173 bits_per_channel); |
123 if (config_ok) | 174 if (!config_ok || is_initialized_) { |
124 algorithm_->Initialize(channels, sample_rate, bits_per_channel, 0.0f, cb); | |
125 | |
126 // Give the subclass an opportunity to initialize itself. | |
127 if (!config_ok || !OnInitialize(bits_per_channel, channel_layout, | |
128 sample_rate)) { | |
129 init_cb.Run(PIPELINE_ERROR_INITIALIZATION_FAILED); | 175 init_cb.Run(PIPELINE_ERROR_INITIALIZATION_FAILED); |
130 return; | 176 return; |
131 } | 177 } |
132 | 178 |
179 if (config_ok) | |
180 algorithm_->Initialize(channels, sample_rate, bits_per_channel, 0.0f, cb); | |
181 | |
182 // We use the AUDIO_PCM_LINEAR flag because AUDIO_PCM_LOW_LATENCY | |
183 // does not currently support all the sample-rates that we require. | |
184 // Please see: http://code.google.com/p/chromium/issues/detail?id=103627 | |
185 // for more details. | |
186 audio_parameters_ = AudioParameters( | |
187 AudioParameters::AUDIO_PCM_LINEAR, channel_layout, sample_rate, | |
188 bits_per_channel, GetHighLatencyOutputBufferSize(sample_rate)); | |
189 | |
190 bytes_per_second_ = audio_parameters_.GetBytesPerSecond(); | |
191 | |
192 DCHECK(sink_.get()); | |
193 DCHECK(!is_initialized_); | |
194 | |
195 sink_->Initialize(audio_parameters_, this); | |
196 | |
197 sink_->Start(); | |
198 is_initialized_ = true; | |
199 | |
133 // Finally, execute the start callback. | 200 // Finally, execute the start callback. |
134 state_ = kPaused; | 201 state_ = kPaused; |
135 init_cb.Run(PIPELINE_OK); | 202 init_cb.Run(PIPELINE_OK); |
136 } | 203 } |
137 | 204 |
138 bool AudioRendererBase::HasEnded() { | 205 bool AudioRendererBase::HasEnded() { |
139 base::AutoLock auto_lock(lock_); | 206 base::AutoLock auto_lock(lock_); |
140 DCHECK(!rendered_end_of_stream_ || algorithm_->NeedsMoreData()); | 207 DCHECK(!rendered_end_of_stream_ || algorithm_->NeedsMoreData()); |
141 | 208 |
142 return received_end_of_stream_ && rendered_end_of_stream_; | 209 return received_end_of_stream_ && rendered_end_of_stream_; |
143 } | 210 } |
144 | 211 |
145 void AudioRendererBase::ResumeAfterUnderflow(bool buffer_more_audio) { | 212 void AudioRendererBase::ResumeAfterUnderflow(bool buffer_more_audio) { |
146 base::AutoLock auto_lock(lock_); | 213 base::AutoLock auto_lock(lock_); |
147 if (state_ == kUnderflow) { | 214 if (state_ == kUnderflow) { |
148 if (buffer_more_audio) | 215 if (buffer_more_audio) |
149 algorithm_->IncreaseQueueCapacity(); | 216 algorithm_->IncreaseQueueCapacity(); |
150 | 217 |
151 state_ = kRebuffering; | 218 state_ = kRebuffering; |
152 } | 219 } |
153 } | 220 } |
154 | 221 |
222 void AudioRendererBase::SetVolume(float volume) { | |
223 if (stopped_) | |
224 return; | |
225 sink_->SetVolume(volume); | |
226 } | |
227 | |
155 void AudioRendererBase::DecodedAudioReady(scoped_refptr<Buffer> buffer) { | 228 void AudioRendererBase::DecodedAudioReady(scoped_refptr<Buffer> buffer) { |
156 base::AutoLock auto_lock(lock_); | 229 base::AutoLock auto_lock(lock_); |
157 DCHECK(state_ == kPaused || state_ == kSeeking || state_ == kPlaying || | 230 DCHECK(state_ == kPaused || state_ == kSeeking || state_ == kPlaying || |
158 state_ == kUnderflow || state_ == kRebuffering || state_ == kStopped); | 231 state_ == kUnderflow || state_ == kRebuffering || state_ == kStopped); |
159 | 232 |
160 CHECK(pending_read_); | 233 CHECK(pending_read_); |
161 pending_read_ = false; | 234 pending_read_ = false; |
162 | 235 |
163 if (buffer && buffer->IsEndOfStream()) { | 236 if (buffer && buffer->IsEndOfStream()) { |
164 received_end_of_stream_ = true; | 237 received_end_of_stream_ = true; |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
199 algorithm_->EnqueueBuffer(buffer); | 272 algorithm_->EnqueueBuffer(buffer); |
200 return; | 273 return; |
201 case kStopped: | 274 case kStopped: |
202 return; | 275 return; |
203 } | 276 } |
204 } | 277 } |
205 | 278 |
206 uint32 AudioRendererBase::FillBuffer(uint8* dest, | 279 uint32 AudioRendererBase::FillBuffer(uint8* dest, |
207 uint32 requested_frames, | 280 uint32 requested_frames, |
208 const base::TimeDelta& playback_delay) { | 281 const base::TimeDelta& playback_delay) { |
209 // The timestamp of the last buffer written during the last call to | 282 // The |audio_time_buffered_| is the ending timestamp of the last frame |
210 // FillBuffer(). | 283 // buffered at the audio device. |playback_delay| is the amount of time |
211 base::TimeDelta last_fill_buffer_time; | 284 // buffered at the audio device. The current time can be computed by their |
285 // difference. | |
286 base::TimeDelta current_time = audio_time_buffered_ - playback_delay; | |
287 | |
212 size_t frames_written = 0; | 288 size_t frames_written = 0; |
213 base::Closure underflow_cb; | 289 base::Closure underflow_cb; |
214 { | 290 { |
215 base::AutoLock auto_lock(lock_); | 291 base::AutoLock auto_lock(lock_); |
216 | 292 |
217 if (state_ == kRebuffering && algorithm_->IsQueueFull()) | 293 if (state_ == kRebuffering && algorithm_->IsQueueFull()) |
218 state_ = kPlaying; | 294 state_ = kPlaying; |
219 | 295 |
220 // Mute audio by returning 0 when not playing. | 296 // Mute audio by returning 0 when not playing. |
221 if (state_ != kPlaying) { | 297 if (state_ != kPlaying) { |
222 // TODO(scherkus): To keep the audio hardware busy we write at most 8k of | 298 // TODO(scherkus): To keep the audio hardware busy we write at most 8k of |
223 // zeros. This gets around the tricky situation of pausing and resuming | 299 // zeros. This gets around the tricky situation of pausing and resuming |
224 // the audio IPC layer in Chrome. Ideally, we should return zero and then | 300 // the audio IPC layer in Chrome. Ideally, we should return zero and then |
225 // the subclass can restart the conversation. | 301 // the subclass can restart the conversation. |
226 // | 302 // |
227 // This should get handled by the subclass http://crbug.com/106600 | 303 // This should get handled by the subclass http://crbug.com/106600 |
228 const uint32 kZeroLength = 8192; | 304 const uint32 kZeroLength = 8192; |
229 size_t zeros_to_write = | 305 size_t zeros_to_write = |
230 std::min(kZeroLength, requested_frames * bytes_per_frame_); | 306 std::min(kZeroLength, requested_frames * bytes_per_frame_); |
231 memset(dest, 0, zeros_to_write); | 307 memset(dest, 0, zeros_to_write); |
232 return zeros_to_write / bytes_per_frame_; | 308 return zeros_to_write / bytes_per_frame_; |
233 } | 309 } |
234 | 310 |
235 // Save a local copy of last fill buffer time and reset the member. | |
236 last_fill_buffer_time = last_fill_buffer_time_; | |
237 last_fill_buffer_time_ = base::TimeDelta(); | |
238 | |
239 // Use three conditions to determine the end of playback: | 311 // Use three conditions to determine the end of playback: |
240 // 1. Algorithm needs more audio data. | 312 // 1. Algorithm needs more audio data. |
241 // 2. We've received an end of stream buffer. | 313 // 2. We've received an end of stream buffer. |
242 // (received_end_of_stream_ == true) | 314 // (received_end_of_stream_ == true) |
243 // 3. Browser process has no audio data being played. | 315 // 3. Browser process has no audio data being played. |
244 // There is no way to check that condition that would work for all | 316 // There is no way to check that condition that would work for all |
245 // derived classes, so call virtual method that would either render | 317 // derived classes, so call virtual method that would either render |
246 // end of stream or schedule such rendering. | 318 // end of stream or schedule such rendering. |
247 // | 319 // |
248 // Three conditions determine when an underflow occurs: | 320 // Three conditions determine when an underflow occurs: |
249 // 1. Algorithm has no audio data. | 321 // 1. Algorithm has no audio data. |
250 // 2. Currently in the kPlaying state. | 322 // 2. Currently in the kPlaying state. |
251 // 3. Have not received an end of stream buffer. | 323 // 3. Have not received an end of stream buffer. |
252 if (algorithm_->NeedsMoreData()) { | 324 if (algorithm_->NeedsMoreData()) { |
253 if (received_end_of_stream_) { | 325 if (received_end_of_stream_) { |
254 OnRenderEndOfStream(); | 326 // TODO(enal): schedule callback instead of polling. |
327 if (base::Time::Now() >= earliest_end_time_) | |
328 SignalEndOfStream(); | |
255 } else if (state_ == kPlaying) { | 329 } else if (state_ == kPlaying) { |
256 state_ = kUnderflow; | 330 state_ = kUnderflow; |
257 underflow_cb = underflow_cb_; | 331 underflow_cb = underflow_cb_; |
258 } | 332 } |
259 } else { | 333 } else { |
260 // Otherwise fill the buffer. | 334 // Otherwise fill the buffer. |
261 frames_written = algorithm_->FillBuffer(dest, requested_frames); | 335 frames_written = algorithm_->FillBuffer(dest, requested_frames); |
262 } | 336 } |
263 | |
264 // Get the current time. | |
265 last_fill_buffer_time_ = algorithm_->GetTime(); | |
266 } | 337 } |
267 | 338 |
268 // Update the pipeline's time if it was set last time. | 339 base::TimeDelta previous_time_buffered = audio_time_buffered_; |
269 base::TimeDelta new_current_time = last_fill_buffer_time - playback_delay; | 340 // The call to FillBuffer() on |algorithm_| has increased the amount of |
270 if (last_fill_buffer_time.InMicroseconds() > 0 && | 341 // buffered audio data. Update the new amount of time buffered. |
271 (last_fill_buffer_time != last_fill_buffer_time_ || | 342 audio_time_buffered_ = algorithm_->GetTime(); |
272 new_current_time > host()->GetTime())) { | 343 |
273 time_cb_.Run(new_current_time, last_fill_buffer_time); | 344 if (previous_time_buffered.InMicroseconds() > 0 && |
345 (previous_time_buffered != audio_time_buffered_ || | |
346 current_time > host()->GetTime())) { | |
347 time_cb_.Run(current_time, audio_time_buffered_); | |
274 } | 348 } |
275 | 349 |
276 if (!underflow_cb.is_null()) | 350 if (!underflow_cb.is_null()) |
277 underflow_cb.Run(); | 351 underflow_cb.Run(); |
278 | 352 |
279 return frames_written; | 353 return frames_written; |
280 } | 354 } |
281 | 355 |
282 void AudioRendererBase::SignalEndOfStream() { | 356 void AudioRendererBase::SignalEndOfStream() { |
283 DCHECK(received_end_of_stream_); | 357 DCHECK(received_end_of_stream_); |
284 if (!rendered_end_of_stream_) { | 358 if (!rendered_end_of_stream_) { |
285 rendered_end_of_stream_ = true; | 359 rendered_end_of_stream_ = true; |
286 host()->NotifyEnded(); | 360 host()->NotifyEnded(); |
287 } | 361 } |
288 } | 362 } |
289 | 363 |
290 void AudioRendererBase::ScheduleRead_Locked() { | 364 void AudioRendererBase::ScheduleRead_Locked() { |
291 lock_.AssertAcquired(); | 365 lock_.AssertAcquired(); |
292 if (pending_read_ || state_ == kPaused) | 366 if (pending_read_ || state_ == kPaused) |
293 return; | 367 return; |
294 pending_read_ = true; | 368 pending_read_ = true; |
295 decoder_->Read(read_cb_); | 369 decoder_->Read(read_cb_); |
296 } | 370 } |
297 | 371 |
298 void AudioRendererBase::SetPlaybackRate(float playback_rate) { | 372 void AudioRendererBase::SetPlaybackRate(float playback_rate) { |
373 DCHECK_LE(0.0f, playback_rate); | |
374 | |
375 if (!stopped_) { | |
376 // Notify sink of new playback rate. | |
377 sink_->SetPlaybackRate(playback_rate); | |
378 | |
379 // We have two cases here: | |
380 // Play: GetPlaybackRate() == 0.0 && playback_rate != 0.0 | |
381 // Pause: GetPlaybackRate() != 0.0 && playback_rate == 0.0 | |
382 if (GetPlaybackRate() == 0.0f && playback_rate != 0.0f) { | |
383 DoPlay(); | |
384 } else if (GetPlaybackRate() != 0.0f && playback_rate == 0.0f) { | |
385 // Pause is easy, we can always pause. | |
386 DoPause(); | |
387 } | |
388 } | |
389 | |
299 base::AutoLock auto_lock(lock_); | 390 base::AutoLock auto_lock(lock_); |
300 algorithm_->SetPlaybackRate(playback_rate); | 391 algorithm_->SetPlaybackRate(playback_rate); |
301 } | 392 } |
302 | 393 |
303 float AudioRendererBase::GetPlaybackRate() { | 394 float AudioRendererBase::GetPlaybackRate() { |
304 base::AutoLock auto_lock(lock_); | 395 base::AutoLock auto_lock(lock_); |
305 return algorithm_->playback_rate(); | 396 return algorithm_->playback_rate(); |
306 } | 397 } |
307 | 398 |
308 bool AudioRendererBase::IsBeforeSeekTime(const scoped_refptr<Buffer>& buffer) { | 399 bool AudioRendererBase::IsBeforeSeekTime(const scoped_refptr<Buffer>& buffer) { |
309 return (state_ == kSeeking) && buffer && !buffer->IsEndOfStream() && | 400 return (state_ == kSeeking) && buffer && !buffer->IsEndOfStream() && |
310 (buffer->GetTimestamp() + buffer->GetDuration()) < seek_timestamp_; | 401 (buffer->GetTimestamp() + buffer->GetDuration()) < seek_timestamp_; |
311 } | 402 } |
312 | 403 |
404 int AudioRendererBase::Render(const std::vector<float*>& audio_data, | |
405 int number_of_frames, | |
406 int audio_delay_milliseconds) { | |
407 if (stopped_ || GetPlaybackRate() == 0.0f) { | |
408 // Output silence if stopped. | |
409 for (size_t i = 0; i < audio_data.size(); ++i) | |
410 memset(audio_data[i], 0, sizeof(float) * number_of_frames); | |
411 return 0; | |
412 } | |
413 | |
414 // Adjust the playback delay. | |
415 base::TimeDelta request_delay = | |
416 base::TimeDelta::FromMilliseconds(audio_delay_milliseconds); | |
417 | |
418 // Finally we need to adjust the delay according to playback rate. | |
419 if (GetPlaybackRate() != 1.0f) { | |
420 request_delay = base::TimeDelta::FromMicroseconds( | |
421 static_cast<int64>(ceil(request_delay.InMicroseconds() * | |
422 GetPlaybackRate()))); | |
423 } | |
424 | |
425 int bytes_per_frame = audio_parameters_.GetBytesPerFrame(); | |
426 | |
427 const int buf_size = number_of_frames * bytes_per_frame; | |
428 scoped_array<uint8> buf(new uint8[buf_size]); | |
429 | |
430 int frames_filled = FillBuffer(buf.get(), number_of_frames, request_delay); | |
431 int bytes_filled = frames_filled * bytes_per_frame; | |
432 DCHECK_LE(bytes_filled, buf_size); | |
433 UpdateEarliestEndTime(bytes_filled, request_delay, base::Time::Now()); | |
434 | |
435 // Deinterleave each audio channel. | |
436 int channels = audio_data.size(); | |
437 for (int channel_index = 0; channel_index < channels; ++channel_index) { | |
438 media::DeinterleaveAudioChannel(buf.get(), | |
439 audio_data[channel_index], | |
440 channels, | |
441 channel_index, | |
442 bytes_per_frame / channels, | |
443 frames_filled); | |
444 | |
445 // If FillBuffer() didn't give us enough data then zero out the remainder. | |
446 if (frames_filled < number_of_frames) { | |
447 int frames_to_zero = number_of_frames - frames_filled; | |
448 memset(audio_data[channel_index] + frames_filled, | |
449 0, | |
450 sizeof(float) * frames_to_zero); | |
451 } | |
452 } | |
453 return frames_filled; | |
454 } | |
455 | |
456 void AudioRendererBase::UpdateEarliestEndTime(int bytes_filled, | |
457 base::TimeDelta request_delay, | |
458 base::Time time_now) { | |
459 if (bytes_filled != 0) { | |
460 base::TimeDelta predicted_play_time = ConvertToDuration(bytes_filled); | |
461 float playback_rate = GetPlaybackRate(); | |
462 if (playback_rate != 1.0f) { | |
463 predicted_play_time = base::TimeDelta::FromMicroseconds( | |
464 static_cast<int64>(ceil(predicted_play_time.InMicroseconds() * | |
465 playback_rate))); | |
466 } | |
467 earliest_end_time_ = | |
468 std::max(earliest_end_time_, | |
469 time_now + request_delay + predicted_play_time); | |
470 } | |
471 } | |
472 | |
473 base::TimeDelta AudioRendererBase::ConvertToDuration(int bytes) { | |
474 if (bytes_per_second_) { | |
475 return base::TimeDelta::FromMicroseconds( | |
476 base::Time::kMicrosecondsPerSecond * bytes / bytes_per_second_); | |
477 } | |
478 return base::TimeDelta(); | |
479 } | |
480 | |
481 void AudioRendererBase::OnRenderError() { | |
482 host()->DisableAudioRenderer(); | |
483 } | |
484 | |
313 } // namespace media | 485 } // namespace media |
OLD | NEW |