Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/cast/audio_receiver/audio_receiver.h" | 5 #include "media/cast/audio_receiver/audio_receiver.h" |
| 6 | 6 |
| 7 #include "base/bind.h" | 7 #include "base/bind.h" |
| 8 #include "base/logging.h" | 8 #include "base/logging.h" |
| 9 #include "base/message_loop/message_loop.h" | 9 #include "base/message_loop/message_loop.h" |
| 10 #include "crypto/encryptor.h" | 10 #include "crypto/encryptor.h" |
| 11 #include "crypto/symmetric_key.h" | 11 #include "crypto/symmetric_key.h" |
| 12 #include "media/cast/audio_receiver/audio_decoder.h" | 12 #include "media/cast/audio_receiver/audio_decoder.h" |
| 13 #include "media/cast/framer/framer.h" | 13 #include "media/cast/framer/framer.h" |
| 14 #include "media/cast/rtcp/rtcp.h" | 14 #include "media/cast/rtcp/rtcp.h" |
| 15 #include "media/cast/rtp_receiver/rtp_receiver.h" | 15 #include "media/cast/rtp_receiver/rtp_receiver.h" |
| 16 | 16 |
| 17 // Max time we wait until an audio frame is due to be played out is released. | 17 // Max time we wait until an audio frame is due to be played out is released. |
| 18 static const int64 kMaxAudioFrameWaitMs = 20; | 18 static const int64 kMaxAudioFrameWaitMs = 20; |
| 19 static const int64 kMinSchedulingDelayMs = 1; | 19 static const int64 kMinSchedulingDelayMs = 1; |
| 20 | 20 |
| 21 namespace media { | 21 namespace media { |
| 22 namespace cast { | 22 namespace cast { |
| 23 | 23 |
| 24 DecodedAudioCallbackData::DecodedAudioCallbackData() | 24 DecodedAudioCallbackData::DecodedAudioCallbackData() |
| 25 : number_of_10ms_blocks(0), | 25 : number_of_10ms_blocks(0), desired_frequency(0), callback() {} |
|
hubbe
2014/01/29 20:07:39
Same here; I think it was easier to read before.
mikhal1
2014/01/29 21:02:38
Same reply.
On 2014/01/29 20:07:39, hubbe wrote:
| |
| 26 desired_frequency(0), | |
| 27 callback() {} | |
| 28 | 26 |
| 29 DecodedAudioCallbackData::~DecodedAudioCallbackData() {} | 27 DecodedAudioCallbackData::~DecodedAudioCallbackData() {} |
| 30 | 28 |
| 31 // Local implementation of RtpData (defined in rtp_rtcp_defines.h). | 29 // Local implementation of RtpData (defined in rtp_rtcp_defines.h). |
| 32 // Used to pass payload data into the audio receiver. | 30 // Used to pass payload data into the audio receiver. |
| 33 class LocalRtpAudioData : public RtpData { | 31 class LocalRtpAudioData : public RtpData { |
| 34 public: | 32 public: |
| 35 explicit LocalRtpAudioData(AudioReceiver* audio_receiver) | 33 explicit LocalRtpAudioData(AudioReceiver* audio_receiver) |
| 36 : audio_receiver_(audio_receiver) {} | 34 : audio_receiver_(audio_receiver) {} |
| 37 | 35 |
| 38 virtual void OnReceivedPayloadData( | 36 virtual void OnReceivedPayloadData(const uint8* payload_data, |
| 39 const uint8* payload_data, | 37 size_t payload_size, |
| 40 size_t payload_size, | 38 const RtpCastHeader* rtp_header) OVERRIDE { |
| 41 const RtpCastHeader* rtp_header) OVERRIDE { | 39 audio_receiver_->IncomingParsedRtpPacket( |
| 42 audio_receiver_->IncomingParsedRtpPacket(payload_data, payload_size, | 40 payload_data, payload_size, *rtp_header); |
| 43 *rtp_header); | |
| 44 } | 41 } |
| 45 | 42 |
| 46 private: | 43 private: |
| 47 AudioReceiver* audio_receiver_; | 44 AudioReceiver* audio_receiver_; |
| 48 }; | 45 }; |
| 49 | 46 |
| 50 // Local implementation of RtpPayloadFeedback (defined in rtp_defines.h) | 47 // Local implementation of RtpPayloadFeedback (defined in rtp_defines.h) |
| 51 // Used to convey cast-specific feedback from receiver to sender. | 48 // Used to convey cast-specific feedback from receiver to sender. |
| 52 class LocalRtpAudioFeedback : public RtpPayloadFeedback { | 49 class LocalRtpAudioFeedback : public RtpPayloadFeedback { |
| 53 public: | 50 public: |
| 54 explicit LocalRtpAudioFeedback(AudioReceiver* audio_receiver) | 51 explicit LocalRtpAudioFeedback(AudioReceiver* audio_receiver) |
| 55 : audio_receiver_(audio_receiver) { | 52 : audio_receiver_(audio_receiver) {} |
| 56 } | |
| 57 | 53 |
| 58 virtual void CastFeedback(const RtcpCastMessage& cast_message) OVERRIDE { | 54 virtual void CastFeedback(const RtcpCastMessage& cast_message) OVERRIDE { |
| 59 audio_receiver_->CastFeedback(cast_message); | 55 audio_receiver_->CastFeedback(cast_message); |
| 60 } | 56 } |
| 61 | 57 |
| 62 private: | 58 private: |
| 63 AudioReceiver* audio_receiver_; | 59 AudioReceiver* audio_receiver_; |
| 64 }; | 60 }; |
| 65 | 61 |
| 66 class LocalRtpReceiverStatistics : public RtpReceiverStatistics { | 62 class LocalRtpReceiverStatistics : public RtpReceiverStatistics { |
| 67 public: | 63 public: |
| 68 explicit LocalRtpReceiverStatistics(RtpReceiver* rtp_receiver) | 64 explicit LocalRtpReceiverStatistics(RtpReceiver* rtp_receiver) |
| 69 : rtp_receiver_(rtp_receiver) { | 65 : rtp_receiver_(rtp_receiver) {} |
| 70 } | |
| 71 | 66 |
| 72 virtual void GetStatistics(uint8* fraction_lost, | 67 virtual void GetStatistics(uint8* fraction_lost, |
| 73 uint32* cumulative_lost, // 24 bits valid. | 68 uint32* cumulative_lost, // 24 bits valid. |
| 74 uint32* extended_high_sequence_number, | 69 uint32* extended_high_sequence_number, |
| 75 uint32* jitter) OVERRIDE { | 70 uint32* jitter) OVERRIDE { |
| 76 rtp_receiver_->GetStatistics(fraction_lost, | 71 rtp_receiver_->GetStatistics( |
| 77 cumulative_lost, | 72 fraction_lost, cumulative_lost, extended_high_sequence_number, jitter); |
| 78 extended_high_sequence_number, | |
| 79 jitter); | |
| 80 } | 73 } |
| 81 | 74 |
| 82 private: | 75 private: |
| 83 RtpReceiver* rtp_receiver_; | 76 RtpReceiver* rtp_receiver_; |
| 84 }; | 77 }; |
| 85 | 78 |
| 86 AudioReceiver::AudioReceiver(scoped_refptr<CastEnvironment> cast_environment, | 79 AudioReceiver::AudioReceiver(scoped_refptr<CastEnvironment> cast_environment, |
| 87 const AudioReceiverConfig& audio_config, | 80 const AudioReceiverConfig& audio_config, |
| 88 transport::PacedPacketSender* const packet_sender) | 81 transport::PacedPacketSender* const packet_sender) |
| 89 : cast_environment_(cast_environment), | 82 : cast_environment_(cast_environment), |
| 90 codec_(audio_config.codec), | 83 codec_(audio_config.codec), |
| 91 frequency_(audio_config.frequency), | 84 frequency_(audio_config.frequency), |
| 92 audio_buffer_(), | 85 audio_buffer_(), |
| 93 audio_decoder_(), | 86 audio_decoder_(), |
| 94 time_offset_(), | 87 time_offset_(), |
| 95 weak_factory_(this) { | 88 weak_factory_(this) { |
| 96 target_delay_delta_ = | 89 target_delay_delta_ = |
| 97 base::TimeDelta::FromMilliseconds(audio_config.rtp_max_delay_ms); | 90 base::TimeDelta::FromMilliseconds(audio_config.rtp_max_delay_ms); |
| 98 incoming_payload_callback_.reset(new LocalRtpAudioData(this)); | 91 incoming_payload_callback_.reset(new LocalRtpAudioData(this)); |
| 99 incoming_payload_feedback_.reset(new LocalRtpAudioFeedback(this)); | 92 incoming_payload_feedback_.reset(new LocalRtpAudioFeedback(this)); |
| 100 if (audio_config.use_external_decoder) { | 93 if (audio_config.use_external_decoder) { |
| 101 audio_buffer_.reset(new Framer(cast_environment->Clock(), | 94 audio_buffer_.reset(new Framer(cast_environment->Clock(), |
| 102 incoming_payload_feedback_.get(), | 95 incoming_payload_feedback_.get(), |
| 103 audio_config.incoming_ssrc, | 96 audio_config.incoming_ssrc, |
| 104 true, | 97 true, |
| 105 0)); | 98 0)); |
| 106 } else { | 99 } else { |
| 107 audio_decoder_.reset(new AudioDecoder(cast_environment, | 100 audio_decoder_.reset(new AudioDecoder( |
| 108 audio_config, | 101 cast_environment, audio_config, incoming_payload_feedback_.get())); |
| 109 incoming_payload_feedback_.get())); | |
| 110 } | 102 } |
| 111 if (audio_config.aes_iv_mask.size() == kAesKeySize && | 103 if (audio_config.aes_iv_mask.size() == kAesKeySize && |
| 112 audio_config.aes_key.size() == kAesKeySize) { | 104 audio_config.aes_key.size() == kAesKeySize) { |
| 113 iv_mask_ = audio_config.aes_iv_mask; | 105 iv_mask_ = audio_config.aes_iv_mask; |
| 114 decryption_key_.reset(crypto::SymmetricKey::Import( | 106 decryption_key_.reset(crypto::SymmetricKey::Import( |
| 115 crypto::SymmetricKey::AES, audio_config.aes_key)); | 107 crypto::SymmetricKey::AES, audio_config.aes_key)); |
| 116 decryptor_.reset(new crypto::Encryptor()); | 108 decryptor_.reset(new crypto::Encryptor()); |
| 117 decryptor_->Init(decryption_key_.get(), | 109 decryptor_->Init( |
| 118 crypto::Encryptor::CTR, | 110 decryption_key_.get(), crypto::Encryptor::CTR, std::string()); |
| 119 std::string()); | |
| 120 } else if (audio_config.aes_iv_mask.size() != 0 || | 111 } else if (audio_config.aes_iv_mask.size() != 0 || |
| 121 audio_config.aes_key.size() != 0) { | 112 audio_config.aes_key.size() != 0) { |
| 122 DCHECK(false) << "Invalid crypto configuration"; | 113 DCHECK(false) << "Invalid crypto configuration"; |
| 123 } | 114 } |
| 124 | 115 |
| 125 rtp_receiver_.reset(new RtpReceiver(cast_environment->Clock(), | 116 rtp_receiver_.reset(new RtpReceiver(cast_environment->Clock(), |
| 126 &audio_config, | 117 &audio_config, |
| 127 NULL, | 118 NULL, |
| 128 incoming_payload_callback_.get())); | 119 incoming_payload_callback_.get())); |
| 129 rtp_audio_receiver_statistics_.reset( | 120 rtp_audio_receiver_statistics_.reset( |
| (...skipping 20 matching lines...) Expand all Loading... | |
| 150 ScheduleNextRtcpReport(); | 141 ScheduleNextRtcpReport(); |
| 151 ScheduleNextCastMessage(); | 142 ScheduleNextCastMessage(); |
| 152 } | 143 } |
| 153 | 144 |
| 154 void AudioReceiver::IncomingParsedRtpPacket(const uint8* payload_data, | 145 void AudioReceiver::IncomingParsedRtpPacket(const uint8* payload_data, |
| 155 size_t payload_size, | 146 size_t payload_size, |
| 156 const RtpCastHeader& rtp_header) { | 147 const RtpCastHeader& rtp_header) { |
| 157 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | 148 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| 158 base::TimeTicks now = cast_environment_->Clock()->NowTicks(); | 149 base::TimeTicks now = cast_environment_->Clock()->NowTicks(); |
| 159 | 150 |
| 160 cast_environment_->Logging()->InsertPacketEvent(now, kAudioPacketReceived, | 151 cast_environment_->Logging()->InsertPacketEvent( |
| 161 rtp_header.webrtc.header.timestamp, rtp_header.frame_id, | 152 now, |
| 162 rtp_header.packet_id, rtp_header.max_packet_id, payload_size); | 153 kAudioPacketReceived, |
| 154 rtp_header.webrtc.header.timestamp, | |
| 155 rtp_header.frame_id, | |
| 156 rtp_header.packet_id, | |
| 157 rtp_header.max_packet_id, | |
| 158 payload_size); | |
| 163 | 159 |
| 164 // TODO(pwestin): update this as video to refresh over time. | 160 // TODO(pwestin): update this as video to refresh over time. |
| 165 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | 161 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| 166 if (time_first_incoming_packet_.is_null()) { | 162 if (time_first_incoming_packet_.is_null()) { |
| 167 InitializeTimers(); | 163 InitializeTimers(); |
| 168 first_incoming_rtp_timestamp_ = rtp_header.webrtc.header.timestamp; | 164 first_incoming_rtp_timestamp_ = rtp_header.webrtc.header.timestamp; |
| 169 time_first_incoming_packet_ = now; | 165 time_first_incoming_packet_ = now; |
| 170 } | 166 } |
| 171 | 167 |
| 172 if (audio_decoder_) { | 168 if (audio_decoder_) { |
| 173 DCHECK(!audio_buffer_) << "Invalid internal state"; | 169 DCHECK(!audio_buffer_) << "Invalid internal state"; |
| 174 std::string plaintext(reinterpret_cast<const char*>(payload_data), | 170 std::string plaintext(reinterpret_cast<const char*>(payload_data), |
| 175 payload_size); | 171 payload_size); |
| 176 if (decryptor_) { | 172 if (decryptor_) { |
| 177 plaintext.clear(); | 173 plaintext.clear(); |
| 178 if (!decryptor_->SetCounter(GetAesNonce(rtp_header.frame_id, iv_mask_))) { | 174 if (!decryptor_->SetCounter(GetAesNonce(rtp_header.frame_id, iv_mask_))) { |
| 179 NOTREACHED() << "Failed to set counter"; | 175 NOTREACHED() << "Failed to set counter"; |
| 180 return; | 176 return; |
| 181 } | 177 } |
| 182 if (!decryptor_->Decrypt(base::StringPiece(reinterpret_cast<const char*>( | 178 if (!decryptor_->Decrypt( |
| 183 payload_data), payload_size), &plaintext)) { | 179 base::StringPiece(reinterpret_cast<const char*>(payload_data), |
| 180 payload_size), | |
| 181 &plaintext)) { | |
| 184 VLOG(1) << "Decryption error"; | 182 VLOG(1) << "Decryption error"; |
| 185 return; | 183 return; |
| 186 } | 184 } |
| 187 } | 185 } |
| 188 audio_decoder_->IncomingParsedRtpPacket( | 186 audio_decoder_->IncomingParsedRtpPacket( |
| 189 reinterpret_cast<const uint8*>(plaintext.data()), plaintext.size(), | 187 reinterpret_cast<const uint8*>(plaintext.data()), |
| 188 plaintext.size(), | |
| 190 rtp_header); | 189 rtp_header); |
| 191 if (!queued_decoded_callbacks_.empty()) { | 190 if (!queued_decoded_callbacks_.empty()) { |
| 192 DecodedAudioCallbackData decoded_data = queued_decoded_callbacks_.front(); | 191 DecodedAudioCallbackData decoded_data = queued_decoded_callbacks_.front(); |
| 193 queued_decoded_callbacks_.pop_front(); | 192 queued_decoded_callbacks_.pop_front(); |
| 194 cast_environment_->PostTask(CastEnvironment::AUDIO_DECODER, FROM_HERE, | 193 cast_environment_->PostTask( |
| 195 base::Bind(&AudioReceiver::DecodeAudioFrameThread, | 194 CastEnvironment::AUDIO_DECODER, |
| 196 base::Unretained(this), | 195 FROM_HERE, |
| 197 decoded_data.number_of_10ms_blocks, | 196 base::Bind(&AudioReceiver::DecodeAudioFrameThread, |
| 198 decoded_data.desired_frequency, | 197 base::Unretained(this), |
| 199 decoded_data.callback)); | 198 decoded_data.number_of_10ms_blocks, |
| 199 decoded_data.desired_frequency, | |
| 200 decoded_data.callback)); | |
| 200 } | 201 } |
| 201 return; | 202 return; |
| 202 } | 203 } |
| 203 | 204 |
| 204 DCHECK(audio_buffer_) << "Invalid internal state"; | 205 DCHECK(audio_buffer_) << "Invalid internal state"; |
| 205 DCHECK(!audio_decoder_) << "Invalid internal state"; | 206 DCHECK(!audio_decoder_) << "Invalid internal state"; |
| 206 | 207 |
| 207 bool duplicate = false; | 208 bool duplicate = false; |
| 208 bool complete = audio_buffer_->InsertPacket(payload_data, payload_size, | 209 bool complete = audio_buffer_->InsertPacket( |
| 209 rtp_header, &duplicate); | 210 payload_data, payload_size, rtp_header, &duplicate); |
| 210 if (duplicate) { | 211 if (duplicate) { |
| 211 cast_environment_->Logging()->InsertPacketEvent(now, | 212 cast_environment_->Logging()->InsertPacketEvent( |
| 213 now, | |
| 212 kDuplicatePacketReceived, | 214 kDuplicatePacketReceived, |
| 213 rtp_header.webrtc.header.timestamp, rtp_header.frame_id, | 215 rtp_header.webrtc.header.timestamp, |
| 214 rtp_header.packet_id, rtp_header.max_packet_id, payload_size); | 216 rtp_header.frame_id, |
| 217 rtp_header.packet_id, | |
| 218 rtp_header.max_packet_id, | |
| 219 payload_size); | |
| 215 // Duplicate packets are ignored. | 220 // Duplicate packets are ignored. |
| 216 return; | 221 return; |
| 217 } | 222 } |
| 218 if (!complete) return; // Audio frame not complete; wait for more packets. | 223 if (!complete) |
| 219 if (queued_encoded_callbacks_.empty()) return; | 224 return; // Audio frame not complete; wait for more packets. |
| 225 if (queued_encoded_callbacks_.empty()) | |
| 226 return; | |
| 220 AudioFrameEncodedCallback callback = queued_encoded_callbacks_.front(); | 227 AudioFrameEncodedCallback callback = queued_encoded_callbacks_.front(); |
| 221 queued_encoded_callbacks_.pop_front(); | 228 queued_encoded_callbacks_.pop_front(); |
| 222 cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, | 229 cast_environment_->PostTask(CastEnvironment::MAIN, |
| 223 base::Bind(&AudioReceiver::GetEncodedAudioFrame, | 230 FROM_HERE, |
| 224 weak_factory_.GetWeakPtr(), callback)); | 231 base::Bind(&AudioReceiver::GetEncodedAudioFrame, |
| 232 weak_factory_.GetWeakPtr(), | |
| 233 callback)); | |
| 225 } | 234 } |
| 226 | 235 |
| 227 void AudioReceiver::GetRawAudioFrame(int number_of_10ms_blocks, | 236 void AudioReceiver::GetRawAudioFrame( |
| 228 int desired_frequency, const AudioFrameDecodedCallback& callback) { | 237 int number_of_10ms_blocks, |
| 238 int desired_frequency, | |
| 239 const AudioFrameDecodedCallback& callback) { | |
| 229 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | 240 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| 230 DCHECK(audio_decoder_) << "Invalid function call in this configuration"; | 241 DCHECK(audio_decoder_) << "Invalid function call in this configuration"; |
| 231 // TODO(pwestin): we can skip this function by posting direct to the decoder. | 242 // TODO(pwestin): we can skip this function by posting direct to the decoder. |
| 232 cast_environment_->PostTask(CastEnvironment::AUDIO_DECODER, FROM_HERE, | 243 cast_environment_->PostTask(CastEnvironment::AUDIO_DECODER, |
| 233 base::Bind(&AudioReceiver::DecodeAudioFrameThread, | 244 FROM_HERE, |
| 234 base::Unretained(this), | 245 base::Bind(&AudioReceiver::DecodeAudioFrameThread, |
| 235 number_of_10ms_blocks, | 246 base::Unretained(this), |
| 236 desired_frequency, | 247 number_of_10ms_blocks, |
| 237 callback)); | 248 desired_frequency, |
| 249 callback)); | |
| 238 } | 250 } |
| 239 | 251 |
| 240 void AudioReceiver::DecodeAudioFrameThread( | 252 void AudioReceiver::DecodeAudioFrameThread( |
| 241 int number_of_10ms_blocks, | 253 int number_of_10ms_blocks, |
| 242 int desired_frequency, | 254 int desired_frequency, |
| 243 const AudioFrameDecodedCallback callback) { | 255 const AudioFrameDecodedCallback callback) { |
| 244 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::AUDIO_DECODER)); | 256 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::AUDIO_DECODER)); |
| 245 // TODO(mikhal): Allow the application to allocate this memory. | 257 // TODO(mikhal): Allow the application to allocate this memory. |
| 246 scoped_ptr<PcmAudioFrame> audio_frame(new PcmAudioFrame()); | 258 scoped_ptr<PcmAudioFrame> audio_frame(new PcmAudioFrame()); |
| 247 | 259 |
| 248 uint32 rtp_timestamp = 0; | 260 uint32 rtp_timestamp = 0; |
| 249 if (!audio_decoder_->GetRawAudioFrame(number_of_10ms_blocks, | 261 if (!audio_decoder_->GetRawAudioFrame(number_of_10ms_blocks, |
| 250 desired_frequency, | 262 desired_frequency, |
| 251 audio_frame.get(), | 263 audio_frame.get(), |
| 252 &rtp_timestamp)) { | 264 &rtp_timestamp)) { |
| 253 DecodedAudioCallbackData callback_data; | 265 DecodedAudioCallbackData callback_data; |
| 254 callback_data.number_of_10ms_blocks = number_of_10ms_blocks; | 266 callback_data.number_of_10ms_blocks = number_of_10ms_blocks; |
| 255 callback_data.desired_frequency = desired_frequency; | 267 callback_data.desired_frequency = desired_frequency; |
| 256 callback_data.callback = callback; | 268 callback_data.callback = callback; |
| 257 queued_decoded_callbacks_.push_back(callback_data); | 269 queued_decoded_callbacks_.push_back(callback_data); |
| 258 return; | 270 return; |
| 259 } | 271 } |
| 260 base::TimeTicks now = cast_environment_->Clock()->NowTicks(); | 272 base::TimeTicks now = cast_environment_->Clock()->NowTicks(); |
| 261 | 273 |
| 262 cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, | 274 cast_environment_->PostTask( |
| 275 CastEnvironment::MAIN, | |
| 276 FROM_HERE, | |
| 263 base::Bind(&AudioReceiver::ReturnDecodedFrameWithPlayoutDelay, | 277 base::Bind(&AudioReceiver::ReturnDecodedFrameWithPlayoutDelay, |
| 264 base::Unretained(this), base::Passed(&audio_frame), rtp_timestamp, | 278 base::Unretained(this), |
| 265 callback)); | 279 base::Passed(&audio_frame), |
| 280 rtp_timestamp, | |
| 281 callback)); | |
| 266 } | 282 } |
| 267 | 283 |
| 268 void AudioReceiver::ReturnDecodedFrameWithPlayoutDelay( | 284 void AudioReceiver::ReturnDecodedFrameWithPlayoutDelay( |
| 269 scoped_ptr<PcmAudioFrame> audio_frame, uint32 rtp_timestamp, | 285 scoped_ptr<PcmAudioFrame> audio_frame, |
| 286 uint32 rtp_timestamp, | |
| 270 const AudioFrameDecodedCallback callback) { | 287 const AudioFrameDecodedCallback callback) { |
| 271 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | 288 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| 272 base::TimeTicks now = cast_environment_->Clock()->NowTicks(); | 289 base::TimeTicks now = cast_environment_->Clock()->NowTicks(); |
| 273 cast_environment_->Logging()->InsertFrameEvent(now, kAudioFrameDecoded, | 290 cast_environment_->Logging()->InsertFrameEvent( |
| 274 rtp_timestamp, kFrameIdUnknown); | 291 now, kAudioFrameDecoded, rtp_timestamp, kFrameIdUnknown); |
| 275 | 292 |
| 276 base::TimeTicks playout_time = GetPlayoutTime(now, rtp_timestamp); | 293 base::TimeTicks playout_time = GetPlayoutTime(now, rtp_timestamp); |
| 277 | 294 |
| 278 cast_environment_->Logging()->InsertFrameEventWithDelay(now, | 295 cast_environment_->Logging()->InsertFrameEventWithDelay(now, |
| 279 kAudioPlayoutDelay, rtp_timestamp, kFrameIdUnknown, playout_time - now); | 296 kAudioPlayoutDelay, |
| 297 rtp_timestamp, | |
| 298 kFrameIdUnknown, | |
| 299 playout_time - now); | |
| 280 | 300 |
| 281 // Frame is ready - Send back to the caller. | 301 // Frame is ready - Send back to the caller. |
| 282 cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, | 302 cast_environment_->PostTask( |
| 303 CastEnvironment::MAIN, | |
| 304 FROM_HERE, | |
| 283 base::Bind(callback, base::Passed(&audio_frame), playout_time)); | 305 base::Bind(callback, base::Passed(&audio_frame), playout_time)); |
| 284 } | 306 } |
| 285 | 307 |
| 286 void AudioReceiver::PlayoutTimeout() { | 308 void AudioReceiver::PlayoutTimeout() { |
| 287 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | 309 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| 288 DCHECK(audio_buffer_) << "Invalid function call in this configuration"; | 310 DCHECK(audio_buffer_) << "Invalid function call in this configuration"; |
| 289 if (queued_encoded_callbacks_.empty()) { | 311 if (queued_encoded_callbacks_.empty()) { |
| 290 // Already released by incoming packet. | 312 // Already released by incoming packet. |
| 291 return; | 313 return; |
| 292 } | 314 } |
| 293 uint32 rtp_timestamp = 0; | 315 uint32 rtp_timestamp = 0; |
| 294 bool next_frame = false; | 316 bool next_frame = false; |
| 295 scoped_ptr<transport::EncodedAudioFrame> encoded_frame( | 317 scoped_ptr<transport::EncodedAudioFrame> encoded_frame( |
| 296 new transport::EncodedAudioFrame()); | 318 new transport::EncodedAudioFrame()); |
| 297 | 319 |
| 298 if (!audio_buffer_->GetEncodedAudioFrame(encoded_frame.get(), | 320 if (!audio_buffer_->GetEncodedAudioFrame( |
| 299 &rtp_timestamp, &next_frame)) { | 321 encoded_frame.get(), &rtp_timestamp, &next_frame)) { |
| 300 // We have no audio frames. Wait for new packet(s). | 322 // We have no audio frames. Wait for new packet(s). |
| 301 // Since the application can post multiple AudioFrameEncodedCallback and | 323 // Since the application can post multiple AudioFrameEncodedCallback and |
| 302 // we only check the next frame to play out we might have multiple timeout | 324 // we only check the next frame to play out we might have multiple timeout |
| 303 // events firing after each other; however this should be a rare event. | 325 // events firing after each other; however this should be a rare event. |
| 304 VLOG(1) << "Failed to retrieved a complete frame at this point in time"; | 326 VLOG(1) << "Failed to retrieved a complete frame at this point in time"; |
| 305 return; | 327 return; |
| 306 } | 328 } |
| 307 | 329 |
| 308 if (decryptor_ && !DecryptAudioFrame(&encoded_frame)) { | 330 if (decryptor_ && !DecryptAudioFrame(&encoded_frame)) { |
| 309 // Logging already done. | 331 // Logging already done. |
| 310 return; | 332 return; |
| 311 } | 333 } |
| 312 | 334 |
| 313 if (PostEncodedAudioFrame(queued_encoded_callbacks_.front(), rtp_timestamp, | 335 if (PostEncodedAudioFrame(queued_encoded_callbacks_.front(), |
| 314 next_frame, &encoded_frame)) { | 336 rtp_timestamp, |
| 337 next_frame, | |
| 338 &encoded_frame)) { | |
| 315 // Call succeed remove callback from list. | 339 // Call succeed remove callback from list. |
| 316 queued_encoded_callbacks_.pop_front(); | 340 queued_encoded_callbacks_.pop_front(); |
| 317 } | 341 } |
| 318 } | 342 } |
| 319 | 343 |
| 320 void AudioReceiver::GetEncodedAudioFrame( | 344 void AudioReceiver::GetEncodedAudioFrame( |
| 321 const AudioFrameEncodedCallback& callback) { | 345 const AudioFrameEncodedCallback& callback) { |
| 322 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | 346 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| 323 DCHECK(audio_buffer_) << "Invalid function call in this configuration"; | 347 DCHECK(audio_buffer_) << "Invalid function call in this configuration"; |
| 324 | 348 |
| 325 uint32 rtp_timestamp = 0; | 349 uint32 rtp_timestamp = 0; |
| 326 bool next_frame = false; | 350 bool next_frame = false; |
| 327 scoped_ptr<transport::EncodedAudioFrame> encoded_frame( | 351 scoped_ptr<transport::EncodedAudioFrame> encoded_frame( |
| 328 new transport::EncodedAudioFrame()); | 352 new transport::EncodedAudioFrame()); |
| 329 | 353 |
| 330 if (!audio_buffer_->GetEncodedAudioFrame(encoded_frame.get(), | 354 if (!audio_buffer_->GetEncodedAudioFrame( |
| 331 &rtp_timestamp, &next_frame)) { | 355 encoded_frame.get(), &rtp_timestamp, &next_frame)) { |
| 332 // We have no audio frames. Wait for new packet(s). | 356 // We have no audio frames. Wait for new packet(s). |
| 333 VLOG(1) << "Wait for more audio packets in frame"; | 357 VLOG(1) << "Wait for more audio packets in frame"; |
| 334 queued_encoded_callbacks_.push_back(callback); | 358 queued_encoded_callbacks_.push_back(callback); |
| 335 return; | 359 return; |
| 336 } | 360 } |
| 337 if (decryptor_ && !DecryptAudioFrame(&encoded_frame)) { | 361 if (decryptor_ && !DecryptAudioFrame(&encoded_frame)) { |
| 338 // Logging already done. | 362 // Logging already done. |
| 339 queued_encoded_callbacks_.push_back(callback); | 363 queued_encoded_callbacks_.push_back(callback); |
| 340 return; | 364 return; |
| 341 } | 365 } |
| 342 if (!PostEncodedAudioFrame(callback, rtp_timestamp, next_frame, | 366 if (!PostEncodedAudioFrame( |
| 343 &encoded_frame)) { | 367 callback, rtp_timestamp, next_frame, &encoded_frame)) { |
| 344 // We have an audio frame; however we are missing packets and we have time | 368 // We have an audio frame; however we are missing packets and we have time |
| 345 // to wait for new packet(s). | 369 // to wait for new packet(s). |
| 346 queued_encoded_callbacks_.push_back(callback); | 370 queued_encoded_callbacks_.push_back(callback); |
| 347 } | 371 } |
| 348 } | 372 } |
| 349 | 373 |
| 350 bool AudioReceiver::PostEncodedAudioFrame( | 374 bool AudioReceiver::PostEncodedAudioFrame( |
| 351 const AudioFrameEncodedCallback& callback, | 375 const AudioFrameEncodedCallback& callback, |
| 352 uint32 rtp_timestamp, | 376 uint32 rtp_timestamp, |
| 353 bool next_frame, | 377 bool next_frame, |
| 354 scoped_ptr<transport::EncodedAudioFrame>* encoded_frame) { | 378 scoped_ptr<transport::EncodedAudioFrame>* encoded_frame) { |
| 355 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | 379 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| 356 DCHECK(audio_buffer_) << "Invalid function call in this configuration"; | 380 DCHECK(audio_buffer_) << "Invalid function call in this configuration"; |
| 357 | 381 |
| 358 base::TimeTicks now = cast_environment_->Clock()->NowTicks(); | 382 base::TimeTicks now = cast_environment_->Clock()->NowTicks(); |
| 359 base::TimeTicks playout_time = GetPlayoutTime(now, rtp_timestamp); | 383 base::TimeTicks playout_time = GetPlayoutTime(now, rtp_timestamp); |
| 360 base::TimeDelta time_until_playout = playout_time - now; | 384 base::TimeDelta time_until_playout = playout_time - now; |
| 361 base::TimeDelta min_wait_delta = | 385 base::TimeDelta min_wait_delta = |
| 362 base::TimeDelta::FromMilliseconds(kMaxAudioFrameWaitMs); | 386 base::TimeDelta::FromMilliseconds(kMaxAudioFrameWaitMs); |
| 363 | 387 |
| 364 if (!next_frame && (time_until_playout > min_wait_delta)) { | 388 if (!next_frame && (time_until_playout > min_wait_delta)) { |
| 365 base::TimeDelta time_until_release = time_until_playout - min_wait_delta; | 389 base::TimeDelta time_until_release = time_until_playout - min_wait_delta; |
| 366 cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE, | 390 cast_environment_->PostDelayedTask( |
| 391 CastEnvironment::MAIN, | |
| 392 FROM_HERE, | |
| 367 base::Bind(&AudioReceiver::PlayoutTimeout, weak_factory_.GetWeakPtr()), | 393 base::Bind(&AudioReceiver::PlayoutTimeout, weak_factory_.GetWeakPtr()), |
| 368 time_until_release); | 394 time_until_release); |
| 369 VLOG(1) << "Wait until time to playout:" | 395 VLOG(1) << "Wait until time to playout:" |
| 370 << time_until_release.InMilliseconds(); | 396 << time_until_release.InMilliseconds(); |
| 371 return false; | 397 return false; |
| 372 } | 398 } |
| 373 (*encoded_frame)->codec = codec_; | 399 (*encoded_frame)->codec = codec_; |
| 374 audio_buffer_->ReleaseFrame((*encoded_frame)->frame_id); | 400 audio_buffer_->ReleaseFrame((*encoded_frame)->frame_id); |
| 375 | 401 |
| 376 cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, | 402 cast_environment_->PostTask( |
| 403 CastEnvironment::MAIN, | |
| 404 FROM_HERE, | |
| 377 base::Bind(callback, base::Passed(encoded_frame), playout_time)); | 405 base::Bind(callback, base::Passed(encoded_frame), playout_time)); |
| 378 return true; | 406 return true; |
| 379 } | 407 } |
| 380 | 408 |
| 381 void AudioReceiver::IncomingPacket(scoped_ptr<Packet> packet) { | 409 void AudioReceiver::IncomingPacket(scoped_ptr<Packet> packet) { |
| 382 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | 410 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| 383 bool rtcp_packet = Rtcp::IsRtcpPacket(&packet->front(), packet->size()); | 411 bool rtcp_packet = Rtcp::IsRtcpPacket(&packet->front(), packet->size()); |
| 384 if (!rtcp_packet) { | 412 if (!rtcp_packet) { |
| 385 rtp_receiver_->ReceivedPacket(&packet->front(), packet->size()); | 413 rtp_receiver_->ReceivedPacket(&packet->front(), packet->size()); |
| 386 } else { | 414 } else { |
| (...skipping 21 matching lines...) Expand all Loading... | |
| 408 event_log_message.event_timestamp = event_it->second.timestamp; | 436 event_log_message.event_timestamp = event_it->second.timestamp; |
| 409 event_log_message.delay_delta = event_it->second.delay_delta; | 437 event_log_message.delay_delta = event_it->second.delay_delta; |
| 410 event_log_message.packet_id = event_it->second.packet_id; | 438 event_log_message.packet_id = event_it->second.packet_id; |
| 411 frame_log.event_log_messages_.push_back(event_log_message); | 439 frame_log.event_log_messages_.push_back(event_log_message); |
| 412 } | 440 } |
| 413 receiver_log.push_back(frame_log); | 441 receiver_log.push_back(frame_log); |
| 414 audio_logs.erase(rtp_timestamp); | 442 audio_logs.erase(rtp_timestamp); |
| 415 } | 443 } |
| 416 | 444 |
| 417 base::TimeTicks now = cast_environment_->Clock()->NowTicks(); | 445 base::TimeTicks now = cast_environment_->Clock()->NowTicks(); |
| 418 cast_environment_->Logging()->InsertGenericEvent(now, kAudioAckSent, | 446 cast_environment_->Logging()->InsertGenericEvent( |
| 419 cast_message.ack_frame_id_); | 447 now, kAudioAckSent, cast_message.ack_frame_id_); |
| 420 | 448 |
| 421 rtcp_->SendRtcpFromRtpReceiver(&cast_message, &receiver_log); | 449 rtcp_->SendRtcpFromRtpReceiver(&cast_message, &receiver_log); |
| 422 } | 450 } |
| 423 | 451 |
| 424 base::TimeTicks AudioReceiver::GetPlayoutTime(base::TimeTicks now, | 452 base::TimeTicks AudioReceiver::GetPlayoutTime(base::TimeTicks now, |
| 425 uint32 rtp_timestamp) { | 453 uint32 rtp_timestamp) { |
| 426 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | 454 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| 427 // Senders time in ms when this frame was recorded. | 455 // Senders time in ms when this frame was recorded. |
| 428 // Note: the senders clock and our local clock might not be synced. | 456 // Note: the senders clock and our local clock might not be synced. |
| 429 base::TimeTicks rtp_timestamp_in_ticks; | 457 base::TimeTicks rtp_timestamp_in_ticks; |
| 430 base::TimeTicks playout_time; | 458 base::TimeTicks playout_time; |
| 431 if (time_offset_ == base::TimeDelta()) { | 459 if (time_offset_ == base::TimeDelta()) { |
| 432 if (rtcp_->RtpTimestampInSenderTime(frequency_, | 460 if (rtcp_->RtpTimestampInSenderTime(frequency_, |
| 433 first_incoming_rtp_timestamp_, | 461 first_incoming_rtp_timestamp_, |
| 434 &rtp_timestamp_in_ticks)) { | 462 &rtp_timestamp_in_ticks)) { |
| 435 time_offset_ = time_first_incoming_packet_ - rtp_timestamp_in_ticks; | 463 time_offset_ = time_first_incoming_packet_ - rtp_timestamp_in_ticks; |
| 436 } else { | 464 } else { |
| 437 // We have not received any RTCP to sync the stream play it out as soon as | 465 // We have not received any RTCP to sync the stream play it out as soon as |
| 438 // possible. | 466 // possible. |
| 439 uint32 rtp_timestamp_diff = rtp_timestamp - first_incoming_rtp_timestamp_; | 467 uint32 rtp_timestamp_diff = rtp_timestamp - first_incoming_rtp_timestamp_; |
| 440 | 468 |
| 441 int frequency_khz = frequency_ / 1000; | 469 int frequency_khz = frequency_ / 1000; |
| 442 base::TimeDelta rtp_time_diff_delta = | 470 base::TimeDelta rtp_time_diff_delta = |
| 443 base::TimeDelta::FromMilliseconds(rtp_timestamp_diff / frequency_khz); | 471 base::TimeDelta::FromMilliseconds(rtp_timestamp_diff / frequency_khz); |
| 444 base::TimeDelta time_diff_delta = now - time_first_incoming_packet_; | 472 base::TimeDelta time_diff_delta = now - time_first_incoming_packet_; |
| 445 | 473 |
| 446 playout_time = now + std::max(rtp_time_diff_delta - time_diff_delta, | 474 playout_time = now + std::max(rtp_time_diff_delta - time_diff_delta, |
| 447 base::TimeDelta()); | 475 base::TimeDelta()); |
| 448 } | 476 } |
| 449 } | 477 } |
| 450 if (playout_time.is_null()) { | 478 if (playout_time.is_null()) { |
| 451 // This can fail if we have not received any RTCP packets in a long time. | 479 // This can fail if we have not received any RTCP packets in a long time. |
| 452 playout_time = rtcp_->RtpTimestampInSenderTime(frequency_, rtp_timestamp, | 480 playout_time = |
| 453 &rtp_timestamp_in_ticks) ? | 481 rtcp_->RtpTimestampInSenderTime( |
| 454 rtp_timestamp_in_ticks + time_offset_ + target_delay_delta_ : now; | 482 frequency_, rtp_timestamp, &rtp_timestamp_in_ticks) |
| 483 ? rtp_timestamp_in_ticks + time_offset_ + target_delay_delta_ | |
|
hubbe
2014/01/29 20:07:39
Can we just write this as a regular if statement i
mikhal1
2014/01/29 21:02:38
Done.
| |
| 484 : now; | |
| 455 } | 485 } |
| 456 // Don't allow the playout time to go backwards. | 486 // Don't allow the playout time to go backwards. |
| 457 if (last_playout_time_ > playout_time) | 487 if (last_playout_time_ > playout_time) |
| 458 playout_time = last_playout_time_; | 488 playout_time = last_playout_time_; |
| 459 last_playout_time_ = playout_time; | 489 last_playout_time_ = playout_time; |
| 460 return playout_time; | 490 return playout_time; |
| 461 } | 491 } |
| 462 | 492 |
| 463 bool AudioReceiver::DecryptAudioFrame( | 493 bool AudioReceiver::DecryptAudioFrame( |
| 464 scoped_ptr<transport::EncodedAudioFrame>* audio_frame) { | 494 scoped_ptr<transport::EncodedAudioFrame>* audio_frame) { |
| 465 DCHECK(decryptor_) << "Invalid state"; | 495 DCHECK(decryptor_) << "Invalid state"; |
| 466 | 496 |
| 467 if (!decryptor_->SetCounter(GetAesNonce((*audio_frame)->frame_id, | 497 if (!decryptor_->SetCounter( |
| 468 iv_mask_))) { | 498 GetAesNonce((*audio_frame)->frame_id, iv_mask_))) { |
| 469 NOTREACHED() << "Failed to set counter"; | 499 NOTREACHED() << "Failed to set counter"; |
| 470 return false; | 500 return false; |
| 471 } | 501 } |
| 472 std::string decrypted_audio_data; | 502 std::string decrypted_audio_data; |
| 473 if (!decryptor_->Decrypt((*audio_frame)->data, &decrypted_audio_data)) { | 503 if (!decryptor_->Decrypt((*audio_frame)->data, &decrypted_audio_data)) { |
| 474 VLOG(1) << "Decryption error"; | 504 VLOG(1) << "Decryption error"; |
| 475 // Give up on this frame, release it from jitter buffer. | 505 // Give up on this frame, release it from jitter buffer. |
| 476 audio_buffer_->ReleaseFrame((*audio_frame)->frame_id); | 506 audio_buffer_->ReleaseFrame((*audio_frame)->frame_id); |
| 477 return false; | 507 return false; |
| 478 } | 508 } |
| 479 (*audio_frame)->data.swap(decrypted_audio_data); | 509 (*audio_frame)->data.swap(decrypted_audio_data); |
| 480 return true; | 510 return true; |
| 481 } | 511 } |
| 482 | 512 |
| 483 void AudioReceiver::ScheduleNextRtcpReport() { | 513 void AudioReceiver::ScheduleNextRtcpReport() { |
| 484 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | 514 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| 485 base::TimeDelta time_to_send = rtcp_->TimeToSendNextRtcpReport() - | 515 base::TimeDelta time_to_send = rtcp_->TimeToSendNextRtcpReport() - |
| 486 cast_environment_->Clock()->NowTicks(); | 516 cast_environment_->Clock()->NowTicks(); |
| 487 | 517 |
| 488 time_to_send = std::max(time_to_send, | 518 time_to_send = std::max( |
| 489 base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs)); | 519 time_to_send, base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs)); |
| 490 | 520 |
| 491 cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE, | 521 cast_environment_->PostDelayedTask( |
| 522 CastEnvironment::MAIN, | |
| 523 FROM_HERE, | |
| 492 base::Bind(&AudioReceiver::SendNextRtcpReport, | 524 base::Bind(&AudioReceiver::SendNextRtcpReport, |
| 493 weak_factory_.GetWeakPtr()), time_to_send); | 525 weak_factory_.GetWeakPtr()), |
| 526 time_to_send); | |
| 494 } | 527 } |
| 495 | 528 |
| 496 void AudioReceiver::SendNextRtcpReport() { | 529 void AudioReceiver::SendNextRtcpReport() { |
| 497 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | 530 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| 498 // TODO(pwestin): add logging. | 531 // TODO(pwestin): add logging. |
| 499 rtcp_->SendRtcpFromRtpReceiver(NULL, NULL); | 532 rtcp_->SendRtcpFromRtpReceiver(NULL, NULL); |
| 500 ScheduleNextRtcpReport(); | 533 ScheduleNextRtcpReport(); |
| 501 } | 534 } |
| 502 | 535 |
| 503 // Cast messages should be sent within a maximum interval. Schedule a call | 536 // Cast messages should be sent within a maximum interval. Schedule a call |
| 504 // if not triggered elsewhere, e.g. by the cast message_builder. | 537 // if not triggered elsewhere, e.g. by the cast message_builder. |
| 505 void AudioReceiver::ScheduleNextCastMessage() { | 538 void AudioReceiver::ScheduleNextCastMessage() { |
| 506 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | 539 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| 507 base::TimeTicks send_time; | 540 base::TimeTicks send_time; |
| 508 if (audio_buffer_) { | 541 if (audio_buffer_) { |
| 509 audio_buffer_->TimeToSendNextCastMessage(&send_time); | 542 audio_buffer_->TimeToSendNextCastMessage(&send_time); |
| 510 } else if (audio_decoder_) { | 543 } else if (audio_decoder_) { |
| 511 audio_decoder_->TimeToSendNextCastMessage(&send_time); | 544 audio_decoder_->TimeToSendNextCastMessage(&send_time); |
| 512 } else { | 545 } else { |
| 513 NOTREACHED(); | 546 NOTREACHED(); |
| 514 } | 547 } |
| 515 base::TimeDelta time_to_send = send_time - | 548 base::TimeDelta time_to_send = |
| 516 cast_environment_->Clock()->NowTicks(); | 549 send_time - cast_environment_->Clock()->NowTicks(); |
| 517 time_to_send = std::max(time_to_send, | 550 time_to_send = std::max( |
| 518 base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs)); | 551 time_to_send, base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs)); |
| 519 cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE, | 552 cast_environment_->PostDelayedTask( |
| 553 CastEnvironment::MAIN, | |
| 554 FROM_HERE, | |
| 520 base::Bind(&AudioReceiver::SendNextCastMessage, | 555 base::Bind(&AudioReceiver::SendNextCastMessage, |
| 521 weak_factory_.GetWeakPtr()), time_to_send); | 556 weak_factory_.GetWeakPtr()), |
| 557 time_to_send); | |
| 522 } | 558 } |
| 523 | 559 |
| 524 void AudioReceiver::SendNextCastMessage() { | 560 void AudioReceiver::SendNextCastMessage() { |
| 525 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | 561 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| 526 | 562 |
| 527 if (audio_buffer_) { | 563 if (audio_buffer_) { |
| 528 // Will only send a message if it is time. | 564 // Will only send a message if it is time. |
| 529 audio_buffer_->SendCastMessage(); | 565 audio_buffer_->SendCastMessage(); |
| 530 } | 566 } |
| 531 if (audio_decoder_) { | 567 if (audio_decoder_) { |
| 532 // Will only send a message if it is time. | 568 // Will only send a message if it is time. |
| 533 audio_decoder_->SendCastMessage(); | 569 audio_decoder_->SendCastMessage(); |
| 534 } | 570 } |
| 535 ScheduleNextCastMessage(); | 571 ScheduleNextCastMessage(); |
| 536 } | 572 } |
| 537 | 573 |
| 538 } // namespace cast | 574 } // namespace cast |
| 539 } // namespace media | 575 } // namespace media |
| OLD | NEW |