OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/cast/audio_receiver/audio_receiver.h" | 5 #include "media/cast/audio_receiver/audio_receiver.h" |
6 | 6 |
7 #include "base/bind.h" | 7 #include "base/bind.h" |
8 #include "base/logging.h" | 8 #include "base/logging.h" |
9 #include "base/message_loop/message_loop.h" | 9 #include "base/message_loop/message_loop.h" |
10 #include "crypto/encryptor.h" | 10 #include "crypto/encryptor.h" |
(...skipping 264 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
275 | 275 |
276 void AudioReceiver::PlayoutTimeout() { | 276 void AudioReceiver::PlayoutTimeout() { |
277 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | 277 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
278 DCHECK(audio_buffer_) << "Invalid function call in this configuration"; | 278 DCHECK(audio_buffer_) << "Invalid function call in this configuration"; |
279 if (queued_encoded_callbacks_.empty()) { | 279 if (queued_encoded_callbacks_.empty()) { |
280 // Already released by incoming packet. | 280 // Already released by incoming packet. |
281 return; | 281 return; |
282 } | 282 } |
283 uint32 rtp_timestamp = 0; | 283 uint32 rtp_timestamp = 0; |
284 bool next_frame = false; | 284 bool next_frame = false; |
285 scoped_ptr<EncodedAudioFrame> encoded_frame(new EncodedAudioFrame()); | 285 scoped_ptr<transport::EncodedAudioFrame> encoded_frame( |
| 286 new transport::EncodedAudioFrame()); |
286 | 287 |
287 if (!audio_buffer_->GetEncodedAudioFrame(encoded_frame.get(), | 288 if (!audio_buffer_->GetEncodedAudioFrame(encoded_frame.get(), |
288 &rtp_timestamp, &next_frame)) { | 289 &rtp_timestamp, &next_frame)) { |
289 // We have no audio frames. Wait for new packet(s). | 290 // We have no audio frames. Wait for new packet(s). |
290 // Since the application can post multiple AudioFrameEncodedCallback and | 291 // Since the application can post multiple AudioFrameEncodedCallback and |
291 // we only check the next frame to play out we might have multiple timeout | 292 // we only check the next frame to play out we might have multiple timeout |
292 // events firing after each other; however this should be a rare event. | 293 // events firing after each other; however this should be a rare event. |
293 VLOG(1) << "Failed to retrieved a complete frame at this point in time"; | 294 VLOG(1) << "Failed to retrieved a complete frame at this point in time"; |
294 return; | 295 return; |
295 } | 296 } |
(...skipping 10 matching lines...) Expand all Loading... |
306 } | 307 } |
307 } | 308 } |
308 | 309 |
309 void AudioReceiver::GetEncodedAudioFrame( | 310 void AudioReceiver::GetEncodedAudioFrame( |
310 const AudioFrameEncodedCallback& callback) { | 311 const AudioFrameEncodedCallback& callback) { |
311 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | 312 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
312 DCHECK(audio_buffer_) << "Invalid function call in this configuration"; | 313 DCHECK(audio_buffer_) << "Invalid function call in this configuration"; |
313 | 314 |
314 uint32 rtp_timestamp = 0; | 315 uint32 rtp_timestamp = 0; |
315 bool next_frame = false; | 316 bool next_frame = false; |
316 scoped_ptr<EncodedAudioFrame> encoded_frame(new EncodedAudioFrame()); | 317 scoped_ptr<transport::EncodedAudioFrame> encoded_frame( |
| 318 new transport::EncodedAudioFrame()); |
317 | 319 |
318 if (!audio_buffer_->GetEncodedAudioFrame(encoded_frame.get(), | 320 if (!audio_buffer_->GetEncodedAudioFrame(encoded_frame.get(), |
319 &rtp_timestamp, &next_frame)) { | 321 &rtp_timestamp, &next_frame)) { |
320 // We have no audio frames. Wait for new packet(s). | 322 // We have no audio frames. Wait for new packet(s). |
321 VLOG(1) << "Wait for more audio packets in frame"; | 323 VLOG(1) << "Wait for more audio packets in frame"; |
322 queued_encoded_callbacks_.push_back(callback); | 324 queued_encoded_callbacks_.push_back(callback); |
323 return; | 325 return; |
324 } | 326 } |
325 if (decryptor_ && !DecryptAudioFrame(&encoded_frame)) { | 327 if (decryptor_ && !DecryptAudioFrame(&encoded_frame)) { |
326 // Logging already done. | 328 // Logging already done. |
327 queued_encoded_callbacks_.push_back(callback); | 329 queued_encoded_callbacks_.push_back(callback); |
328 return; | 330 return; |
329 } | 331 } |
330 if (!PostEncodedAudioFrame(callback, rtp_timestamp, next_frame, | 332 if (!PostEncodedAudioFrame(callback, rtp_timestamp, next_frame, |
331 &encoded_frame)) { | 333 &encoded_frame)) { |
332 // We have an audio frame; however we are missing packets and we have time | 334 // We have an audio frame; however we are missing packets and we have time |
333 // to wait for new packet(s). | 335 // to wait for new packet(s). |
334 queued_encoded_callbacks_.push_back(callback); | 336 queued_encoded_callbacks_.push_back(callback); |
335 } | 337 } |
336 } | 338 } |
337 | 339 |
338 bool AudioReceiver::PostEncodedAudioFrame( | 340 bool AudioReceiver::PostEncodedAudioFrame( |
339 const AudioFrameEncodedCallback& callback, | 341 const AudioFrameEncodedCallback& callback, |
340 uint32 rtp_timestamp, | 342 uint32 rtp_timestamp, |
341 bool next_frame, | 343 bool next_frame, |
342 scoped_ptr<EncodedAudioFrame>* encoded_frame) { | 344 scoped_ptr<transport::EncodedAudioFrame>* encoded_frame) { |
343 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | 345 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
344 DCHECK(audio_buffer_) << "Invalid function call in this configuration"; | 346 DCHECK(audio_buffer_) << "Invalid function call in this configuration"; |
345 | 347 |
346 base::TimeTicks now = cast_environment_->Clock()->NowTicks(); | 348 base::TimeTicks now = cast_environment_->Clock()->NowTicks(); |
347 base::TimeTicks playout_time = GetPlayoutTime(now, rtp_timestamp); | 349 base::TimeTicks playout_time = GetPlayoutTime(now, rtp_timestamp); |
348 base::TimeDelta time_until_playout = playout_time - now; | 350 base::TimeDelta time_until_playout = playout_time - now; |
349 base::TimeDelta min_wait_delta = | 351 base::TimeDelta min_wait_delta = |
350 base::TimeDelta::FromMilliseconds(kMaxAudioFrameWaitMs); | 352 base::TimeDelta::FromMilliseconds(kMaxAudioFrameWaitMs); |
351 | 353 |
352 if (!next_frame && (time_until_playout > min_wait_delta)) { | 354 if (!next_frame && (time_until_playout > min_wait_delta)) { |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
416 rtp_timestamp_in_ticks + time_offset_ + target_delay_delta_ : now; | 418 rtp_timestamp_in_ticks + time_offset_ + target_delay_delta_ : now; |
417 } | 419 } |
418 // Don't allow the playout time to go backwards. | 420 // Don't allow the playout time to go backwards. |
419 if (last_playout_time_ > playout_time) | 421 if (last_playout_time_ > playout_time) |
420 playout_time = last_playout_time_; | 422 playout_time = last_playout_time_; |
421 last_playout_time_ = playout_time; | 423 last_playout_time_ = playout_time; |
422 return playout_time; | 424 return playout_time; |
423 } | 425 } |
424 | 426 |
425 bool AudioReceiver::DecryptAudioFrame( | 427 bool AudioReceiver::DecryptAudioFrame( |
426 scoped_ptr<EncodedAudioFrame>* audio_frame) { | 428 scoped_ptr<transport::EncodedAudioFrame>* audio_frame) { |
427 DCHECK(decryptor_) << "Invalid state"; | 429 DCHECK(decryptor_) << "Invalid state"; |
428 | 430 |
429 if (!decryptor_->SetCounter(GetAesNonce((*audio_frame)->frame_id, | 431 if (!decryptor_->SetCounter(GetAesNonce((*audio_frame)->frame_id, |
430 iv_mask_))) { | 432 iv_mask_))) { |
431 NOTREACHED() << "Failed to set counter"; | 433 NOTREACHED() << "Failed to set counter"; |
432 return false; | 434 return false; |
433 } | 435 } |
434 std::string decrypted_audio_data; | 436 std::string decrypted_audio_data; |
435 if (!decryptor_->Decrypt((*audio_frame)->data, &decrypted_audio_data)) { | 437 if (!decryptor_->Decrypt((*audio_frame)->data, &decrypted_audio_data)) { |
436 VLOG(0) << "Decryption error"; | 438 VLOG(0) << "Decryption error"; |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
492 } | 494 } |
493 if (audio_decoder_) { | 495 if (audio_decoder_) { |
494 // Will only send a message if it is time. | 496 // Will only send a message if it is time. |
495 audio_decoder_->SendCastMessage(); | 497 audio_decoder_->SendCastMessage(); |
496 } | 498 } |
497 ScheduleNextCastMessage(); | 499 ScheduleNextCastMessage(); |
498 } | 500 } |
499 | 501 |
500 } // namespace cast | 502 } // namespace cast |
501 } // namespace media | 503 } // namespace media |
OLD | NEW |