Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/filters/ffmpeg_demuxer.h" | 5 #include "media/filters/ffmpeg_demuxer.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <string> | 8 #include <string> |
| 9 | 9 |
| 10 #include "base/base64.h" | 10 #include "base/base64.h" |
| 11 #include "base/bind.h" | 11 #include "base/bind.h" |
| 12 #include "base/callback.h" | 12 #include "base/callback.h" |
| 13 #include "base/callback_helpers.h" | 13 #include "base/callback_helpers.h" |
| 14 #include "base/command_line.h" | |
| 15 #include "base/memory/scoped_ptr.h" | 14 #include "base/memory/scoped_ptr.h" |
| 16 #include "base/message_loop/message_loop.h" | 15 #include "base/message_loop/message_loop.h" |
| 17 #include "base/metrics/sparse_histogram.h" | 16 #include "base/metrics/sparse_histogram.h" |
| 18 #include "base/stl_util.h" | 17 //#include "base/stl_util.h" |
|
acolwell GONE FROM CHROMIUM
2013/10/14 20:42:24
nit: remove
Matthew Heaney (Chromium)
2013/10/17 05:46:44
Done.
| |
| 19 #include "base/strings/string_util.h" | 18 #include "base/strings/string_util.h" |
| 20 #include "base/strings/stringprintf.h" | 19 #include "base/strings/stringprintf.h" |
| 21 #include "base/task_runner_util.h" | 20 #include "base/task_runner_util.h" |
| 22 #include "base/time/time.h" | 21 #include "base/time/time.h" |
| 23 #include "media/base/audio_decoder_config.h" | 22 #include "media/base/audio_decoder_config.h" |
| 24 #include "media/base/bind_to_loop.h" | 23 #include "media/base/bind_to_loop.h" |
| 25 #include "media/base/decoder_buffer.h" | 24 #include "media/base/decoder_buffer.h" |
| 26 #include "media/base/decrypt_config.h" | 25 #include "media/base/decrypt_config.h" |
| 27 #include "media/base/limits.h" | 26 #include "media/base/limits.h" |
| 28 #include "media/base/media_log.h" | 27 #include "media/base/media_log.h" |
| 29 #include "media/base/media_switches.h" | |
| 30 #include "media/base/video_decoder_config.h" | 28 #include "media/base/video_decoder_config.h" |
| 31 #include "media/ffmpeg/ffmpeg_common.h" | 29 #include "media/ffmpeg/ffmpeg_common.h" |
| 32 #include "media/filters/ffmpeg_glue.h" | 30 #include "media/filters/ffmpeg_glue.h" |
| 33 #include "media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.h" | 31 #include "media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.h" |
| 34 #include "media/webm/webm_crypto_helpers.h" | 32 #include "media/webm/webm_crypto_helpers.h" |
| 35 | 33 |
| 36 namespace media { | 34 namespace media { |
| 37 | 35 |
| 38 // | 36 // |
| 39 // FFmpegDemuxerStream | 37 // FFmpegDemuxerStream |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 57 case AVMEDIA_TYPE_AUDIO: | 55 case AVMEDIA_TYPE_AUDIO: |
| 58 type_ = AUDIO; | 56 type_ = AUDIO; |
| 59 AVStreamToAudioDecoderConfig(stream, &audio_config_, true); | 57 AVStreamToAudioDecoderConfig(stream, &audio_config_, true); |
| 60 is_encrypted = audio_config_.is_encrypted(); | 58 is_encrypted = audio_config_.is_encrypted(); |
| 61 break; | 59 break; |
| 62 case AVMEDIA_TYPE_VIDEO: | 60 case AVMEDIA_TYPE_VIDEO: |
| 63 type_ = VIDEO; | 61 type_ = VIDEO; |
| 64 AVStreamToVideoDecoderConfig(stream, &video_config_, true); | 62 AVStreamToVideoDecoderConfig(stream, &video_config_, true); |
| 65 is_encrypted = video_config_.is_encrypted(); | 63 is_encrypted = video_config_.is_encrypted(); |
| 66 break; | 64 break; |
| 65 case AVMEDIA_TYPE_SUBTITLE: | |
| 66 type_ = TEXT; | |
| 67 break; | |
| 67 default: | 68 default: |
| 68 NOTREACHED(); | 69 NOTREACHED(); |
| 69 break; | 70 break; |
| 70 } | 71 } |
| 71 | 72 |
| 72 // Calculate the duration. | 73 // Calculate the duration. |
| 73 duration_ = ConvertStreamTimestamp(stream->time_base, stream->duration); | 74 duration_ = ConvertStreamTimestamp(stream->time_base, stream->duration); |
| 74 | 75 |
| 75 if (stream_->codec->codec_id == AV_CODEC_ID_H264) { | 76 if (stream_->codec->codec_id == AV_CODEC_ID_H264) { |
| 76 bitstream_converter_.reset( | 77 bitstream_converter_.reset( |
| (...skipping 26 matching lines...) Expand all Loading... | |
| 103 NOTREACHED() << "Attempted to enqueue packet on a stopped stream"; | 104 NOTREACHED() << "Attempted to enqueue packet on a stopped stream"; |
| 104 return; | 105 return; |
| 105 } | 106 } |
| 106 | 107 |
| 107 // Convert the packet if there is a bitstream filter. | 108 // Convert the packet if there is a bitstream filter. |
| 108 if (packet->data && bitstream_converter_enabled_ && | 109 if (packet->data && bitstream_converter_enabled_ && |
| 109 !bitstream_converter_->ConvertPacket(packet.get())) { | 110 !bitstream_converter_->ConvertPacket(packet.get())) { |
| 110 LOG(ERROR) << "Format conversion failed."; | 111 LOG(ERROR) << "Format conversion failed."; |
| 111 } | 112 } |
| 112 | 113 |
| 113 // Get side data if any. For now, the only type of side_data is VP8 Alpha. We | 114 scoped_refptr<DecoderBuffer> buffer; |
| 114 // keep this generic so that other side_data types in the future can be | 115 |
| 115 // handled the same way as well. | 116 // Get side data if any. For now, the only types of side_data are VP8 Alpha, |
| 117 // and WebVTT id and settings. We keep this generic so that other side_data | |
| 118 // types in the future can be handled the same way as well. | |
| 116 av_packet_split_side_data(packet.get()); | 119 av_packet_split_side_data(packet.get()); |
| 117 int side_data_size = 0; | 120 if (type() == DemuxerStream::TEXT) { |
| 118 uint8* side_data = av_packet_get_side_data( | 121 int id_size = 0; |
| 119 packet.get(), | 122 uint8* id_data = av_packet_get_side_data( |
| 120 AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL, | 123 packet.get(), |
| 121 &side_data_size); | 124 AV_PKT_DATA_WEBVTT_IDENTIFIER, |
| 125 &id_size); | |
| 122 | 126 |
| 123 // If a packet is returned by FFmpeg's av_parser_parse2() the packet will | 127 int settings_size = 0; |
| 124 // reference inner memory of FFmpeg. As such we should transfer the packet | 128 uint8* settings_data = av_packet_get_side_data( |
| 125 // into memory we control. | 129 packet.get(), |
| 126 scoped_refptr<DecoderBuffer> buffer; | 130 AV_PKT_DATA_WEBVTT_SETTINGS, |
| 127 if (side_data_size > 0) { | 131 &settings_size); |
| 132 | |
| 133 // The DecoderBuffer only supports a single side data item. In the case of | |
| 134 // a WebVTT cue, we can have potentially two side data items. In order to | |
| 135 // avoid disrupting DecoderBuffer any more than we need to, we copy both | |
| 136 // side data items onto a single one, and terminate each with a NUL marker. | |
| 137 std::vector<uint8> side_data; | |
| 138 side_data.reserve(id_size + 1 + settings_size + 1); | |
| 139 side_data.insert(side_data.end(), | |
| 140 id_data, id_data + id_size); | |
| 141 side_data.push_back(0); | |
| 142 side_data.insert(side_data.end(), | |
| 143 settings_data, settings_data + settings_size); | |
| 144 side_data.push_back(0); | |
| 145 | |
| 128 buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size, | 146 buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size, |
| 129 side_data, side_data_size); | 147 side_data.data(), side_data.size()); |
| 130 } else { | 148 } else { |
| 131 buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size); | 149 int side_data_size = 0; |
| 150 uint8* side_data = av_packet_get_side_data( | |
| 151 packet.get(), | |
| 152 AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL, | |
| 153 &side_data_size); | |
| 154 | |
| 155 // If a packet is returned by FFmpeg's av_parser_parse2() the packet will | |
| 156 // reference inner memory of FFmpeg. As such we should transfer the packet | |
| 157 // into memory we control. | |
| 158 if (side_data_size > 0) { | |
| 159 buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size, | |
| 160 side_data, side_data_size); | |
| 161 } else { | |
| 162 buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size); | |
| 163 } | |
| 132 } | 164 } |
| 133 | 165 |
| 134 if ((type() == DemuxerStream::AUDIO && audio_config_.is_encrypted()) || | 166 if ((type() == DemuxerStream::AUDIO && audio_config_.is_encrypted()) || |
| 135 (type() == DemuxerStream::VIDEO && video_config_.is_encrypted())) { | 167 (type() == DemuxerStream::VIDEO && video_config_.is_encrypted())) { |
| 136 scoped_ptr<DecryptConfig> config(WebMCreateDecryptConfig( | 168 scoped_ptr<DecryptConfig> config(WebMCreateDecryptConfig( |
| 137 packet->data, packet->size, | 169 packet->data, packet->size, |
| 138 reinterpret_cast<const uint8*>(encryption_key_id_.data()), | 170 reinterpret_cast<const uint8*>(encryption_key_id_.data()), |
| 139 encryption_key_id_.size())); | 171 encryption_key_id_.size())); |
| 140 if (!config) | 172 if (!config) |
| 141 LOG(ERROR) << "Creation of DecryptConfig failed."; | 173 LOG(ERROR) << "Creation of DecryptConfig failed."; |
| 142 buffer->set_decrypt_config(config.Pass()); | 174 buffer->set_decrypt_config(config.Pass()); |
| 143 } | 175 } |
| 144 | 176 |
| 145 buffer->set_timestamp(ConvertStreamTimestamp( | 177 buffer->set_timestamp(ConvertStreamTimestamp( |
| 146 stream_->time_base, packet->pts)); | 178 stream_->time_base, packet->pts)); |
| 147 buffer->set_duration(ConvertStreamTimestamp( | 179 buffer->set_duration(ConvertStreamTimestamp( |
| (...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 265 // TODO(scherkus): Remove early return and reenable time-based capacity | 297 // TODO(scherkus): Remove early return and reenable time-based capacity |
| 266 // after our data sources support canceling/concurrent reads, see | 298 // after our data sources support canceling/concurrent reads, see |
| 267 // http://crbug.com/165762 for details. | 299 // http://crbug.com/165762 for details. |
| 268 return !read_cb_.is_null(); | 300 return !read_cb_.is_null(); |
| 269 | 301 |
| 270 // Try to have one second's worth of encoded data per stream. | 302 // Try to have one second's worth of encoded data per stream. |
| 271 const base::TimeDelta kCapacity = base::TimeDelta::FromSeconds(1); | 303 const base::TimeDelta kCapacity = base::TimeDelta::FromSeconds(1); |
| 272 return buffer_queue_.IsEmpty() || buffer_queue_.Duration() < kCapacity; | 304 return buffer_queue_.IsEmpty() || buffer_queue_.Duration() < kCapacity; |
| 273 } | 305 } |
| 274 | 306 |
| 307 TextKind FFmpegDemuxerStream::GetTextKind() const { | |
| 308 DCHECK_EQ(type_, DemuxerStream::TEXT); | |
| 309 | |
| 310 if (stream_->disposition & AV_DISPOSITION_CAPTIONS) | |
| 311 return kTextCaptions; | |
| 312 | |
| 313 if (stream_->disposition & AV_DISPOSITION_DESCRIPTIONS) | |
| 314 return kTextDescriptions; | |
| 315 | |
| 316 if (stream_->disposition & AV_DISPOSITION_METADATA) | |
| 317 return kTextMetadata; | |
| 318 | |
| 319 return kTextSubtitles; | |
| 320 } | |
| 321 | |
| 322 std::string FFmpegDemuxerStream::GetMetadata(const char* key) const { | |
| 323 const AVDictionaryEntry* entry = | |
| 324 av_dict_get(stream_->metadata, key, NULL, 0); | |
| 325 return (entry == NULL || entry->value == NULL) ? "" : entry->value; | |
| 326 } | |
| 327 | |
| 275 // static | 328 // static |
| 276 base::TimeDelta FFmpegDemuxerStream::ConvertStreamTimestamp( | 329 base::TimeDelta FFmpegDemuxerStream::ConvertStreamTimestamp( |
| 277 const AVRational& time_base, int64 timestamp) { | 330 const AVRational& time_base, int64 timestamp) { |
| 278 if (timestamp == static_cast<int64>(AV_NOPTS_VALUE)) | 331 if (timestamp == static_cast<int64>(AV_NOPTS_VALUE)) |
| 279 return kNoTimestamp(); | 332 return kNoTimestamp(); |
| 280 | 333 |
| 281 return ConvertFromTimeBase(time_base, timestamp); | 334 return ConvertFromTimeBase(time_base, timestamp); |
| 282 } | 335 } |
| 283 | 336 |
| 284 // | 337 // |
| 285 // FFmpegDemuxer | 338 // FFmpegDemuxer |
| 286 // | 339 // |
| 287 FFmpegDemuxer::FFmpegDemuxer( | 340 FFmpegDemuxer::FFmpegDemuxer( |
| 288 const scoped_refptr<base::MessageLoopProxy>& message_loop, | 341 const scoped_refptr<base::MessageLoopProxy>& message_loop, |
| 289 DataSource* data_source, | 342 DataSource* data_source, |
| 290 const NeedKeyCB& need_key_cb, | 343 const NeedKeyCB& need_key_cb, |
| 344 bool text_enabled, | |
| 291 const scoped_refptr<MediaLog>& media_log) | 345 const scoped_refptr<MediaLog>& media_log) |
| 292 : host_(NULL), | 346 : host_(NULL), |
| 293 message_loop_(message_loop), | 347 message_loop_(message_loop), |
| 294 weak_factory_(this), | 348 weak_factory_(this), |
| 295 blocking_thread_("FFmpegDemuxer"), | 349 blocking_thread_("FFmpegDemuxer"), |
| 296 pending_read_(false), | 350 pending_read_(false), |
| 297 pending_seek_(false), | 351 pending_seek_(false), |
| 298 data_source_(data_source), | 352 data_source_(data_source), |
| 299 media_log_(media_log), | 353 media_log_(media_log), |
| 300 bitrate_(0), | 354 bitrate_(0), |
| 301 start_time_(kNoTimestamp()), | 355 start_time_(kNoTimestamp()), |
| 302 audio_disabled_(false), | 356 audio_disabled_(false), |
| 357 text_enabled_(text_enabled), | |
| 303 duration_known_(false), | 358 duration_known_(false), |
| 304 url_protocol_(data_source, BindToLoop(message_loop_, base::Bind( | 359 url_protocol_(data_source, BindToLoop(message_loop_, base::Bind( |
| 305 &FFmpegDemuxer::OnDataSourceError, base::Unretained(this)))), | 360 &FFmpegDemuxer::OnDataSourceError, base::Unretained(this)))), |
| 306 need_key_cb_(need_key_cb) { | 361 need_key_cb_(need_key_cb) { |
| 307 DCHECK(message_loop_.get()); | 362 DCHECK(message_loop_.get()); |
| 308 DCHECK(data_source_); | 363 DCHECK(data_source_); |
| 309 } | 364 } |
| 310 | 365 |
| 311 FFmpegDemuxer::~FFmpegDemuxer() {} | 366 FFmpegDemuxer::~FFmpegDemuxer() {} |
| 312 | 367 |
| (...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 402 } | 457 } |
| 403 } | 458 } |
| 404 return NULL; | 459 return NULL; |
| 405 } | 460 } |
| 406 | 461 |
| 407 base::TimeDelta FFmpegDemuxer::GetStartTime() const { | 462 base::TimeDelta FFmpegDemuxer::GetStartTime() const { |
| 408 DCHECK(message_loop_->BelongsToCurrentThread()); | 463 DCHECK(message_loop_->BelongsToCurrentThread()); |
| 409 return start_time_; | 464 return start_time_; |
| 410 } | 465 } |
| 411 | 466 |
| 467 void FFmpegDemuxer::AddTextStreams() { | |
| 468 DCHECK(message_loop_->BelongsToCurrentThread()); | |
| 469 | |
| 470 for (StreamVector::size_type idx = 0; idx < streams_.size(); ++idx) { | |
| 471 FFmpegDemuxerStream* stream = streams_[idx]; | |
| 472 if (stream == NULL || stream->type() != DemuxerStream::TEXT) | |
| 473 continue; | |
| 474 | |
| 475 TextKind kind = stream->GetTextKind(); | |
| 476 std::string title = stream->GetMetadata("title"); | |
| 477 std::string language = stream->GetMetadata("language"); | |
| 478 | |
| 479 host_->AddTextStream(stream, kind, title, language); | |
| 480 } | |
| 481 } | |
| 482 | |
| 412 // Helper for calculating the bitrate of the media based on information stored | 483 // Helper for calculating the bitrate of the media based on information stored |
| 413 // in |format_context| or failing that the size and duration of the media. | 484 // in |format_context| or failing that the size and duration of the media. |
| 414 // | 485 // |
| 415 // Returns 0 if a bitrate could not be determined. | 486 // Returns 0 if a bitrate could not be determined. |
| 416 static int CalculateBitrate( | 487 static int CalculateBitrate( |
| 417 AVFormatContext* format_context, | 488 AVFormatContext* format_context, |
| 418 const base::TimeDelta& duration, | 489 const base::TimeDelta& duration, |
| 419 int64 filesize_in_bytes) { | 490 int64 filesize_in_bytes) { |
| 420 // If there is a bitrate set on the container, use it. | 491 // If there is a bitrate set on the container, use it. |
| 421 if (format_context->bit_rate > 0) | 492 if (format_context->bit_rate > 0) |
| (...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 520 // Log the codec detected, whether it is supported or not. | 591 // Log the codec detected, whether it is supported or not. |
| 521 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedVideoCodec", | 592 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedVideoCodec", |
| 522 codec_context->codec_id); | 593 codec_context->codec_id); |
| 523 // Ensure the codec is supported. IsValidConfig() also checks that the | 594 // Ensure the codec is supported. IsValidConfig() also checks that the |
| 524 // frame size and visible size are valid. | 595 // frame size and visible size are valid. |
| 525 AVStreamToVideoDecoderConfig(stream, &video_config, false); | 596 AVStreamToVideoDecoderConfig(stream, &video_config, false); |
| 526 | 597 |
| 527 if (!video_config.IsValidConfig()) | 598 if (!video_config.IsValidConfig()) |
| 528 continue; | 599 continue; |
| 529 video_stream = stream; | 600 video_stream = stream; |
| 601 } else if (codec_type == AVMEDIA_TYPE_SUBTITLE) { | |
| 602 if (codec_context->codec_id != AV_CODEC_ID_WEBVTT || !text_enabled_) { | |
| 603 continue; | |
| 604 } | |
| 530 } else { | 605 } else { |
| 531 continue; | 606 continue; |
| 532 } | 607 } |
| 533 | 608 |
| 534 streams_[i] = new FFmpegDemuxerStream(this, stream); | 609 streams_[i] = new FFmpegDemuxerStream(this, stream); |
| 535 max_duration = std::max(max_duration, streams_[i]->duration()); | 610 max_duration = std::max(max_duration, streams_[i]->duration()); |
| 536 | 611 |
| 537 if (stream->first_dts != static_cast<int64_t>(AV_NOPTS_VALUE)) { | 612 if (stream->first_dts != static_cast<int64_t>(AV_NOPTS_VALUE)) { |
| 538 const base::TimeDelta first_dts = ConvertFromTimeBase( | 613 const base::TimeDelta first_dts = ConvertFromTimeBase( |
| 539 stream->time_base, stream->first_dts); | 614 stream->time_base, stream->first_dts); |
| 540 if (start_time_ == kNoTimestamp() || first_dts < start_time_) | 615 if (start_time_ == kNoTimestamp() || first_dts < start_time_) |
| 541 start_time_ = first_dts; | 616 start_time_ = first_dts; |
| 542 } | 617 } |
| 543 } | 618 } |
| 544 | 619 |
| 545 if (!audio_stream && !video_stream) { | 620 if (!audio_stream && !video_stream) { |
| 546 status_cb.Run(DEMUXER_ERROR_NO_SUPPORTED_STREAMS); | 621 status_cb.Run(DEMUXER_ERROR_NO_SUPPORTED_STREAMS); |
| 547 return; | 622 return; |
| 548 } | 623 } |
| 549 | 624 |
| 625 if (text_enabled_) | |
| 626 AddTextStreams(); | |
| 627 | |
| 550 if (format_context->duration != static_cast<int64_t>(AV_NOPTS_VALUE)) { | 628 if (format_context->duration != static_cast<int64_t>(AV_NOPTS_VALUE)) { |
| 551 // If there is a duration value in the container use that to find the | 629 // If there is a duration value in the container use that to find the |
| 552 // maximum between it and the duration from A/V streams. | 630 // maximum between it and the duration from A/V streams. |
| 553 const AVRational av_time_base = {1, AV_TIME_BASE}; | 631 const AVRational av_time_base = {1, AV_TIME_BASE}; |
| 554 max_duration = | 632 max_duration = |
| 555 std::max(max_duration, | 633 std::max(max_duration, |
| 556 ConvertFromTimeBase(av_time_base, format_context->duration)); | 634 ConvertFromTimeBase(av_time_base, format_context->duration)); |
| 557 } else { | 635 } else { |
| 558 // The duration is unknown, in which case this is likely a live stream. | 636 // The duration is unknown, in which case this is likely a live stream. |
| 559 max_duration = kInfiniteDuration(); | 637 max_duration = kInfiniteDuration(); |
| (...skipping 274 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 834 } | 912 } |
| 835 for (size_t i = 0; i < buffered.size(); ++i) | 913 for (size_t i = 0; i < buffered.size(); ++i) |
| 836 host_->AddBufferedTimeRange(buffered.start(i), buffered.end(i)); | 914 host_->AddBufferedTimeRange(buffered.start(i), buffered.end(i)); |
| 837 } | 915 } |
| 838 | 916 |
| 839 void FFmpegDemuxer::OnDataSourceError() { | 917 void FFmpegDemuxer::OnDataSourceError() { |
| 840 host_->OnDemuxerError(PIPELINE_ERROR_READ); | 918 host_->OnDemuxerError(PIPELINE_ERROR_READ); |
| 841 } | 919 } |
| 842 | 920 |
| 843 } // namespace media | 921 } // namespace media |
| OLD | NEW |