Chromium Code Reviews| Index: media/filters/ffmpeg_demuxer.cc | 
| diff --git a/media/filters/ffmpeg_demuxer.cc b/media/filters/ffmpeg_demuxer.cc | 
| index 723eb5f28d9caac5b9f4226ca11d7bd745d9205c..9620a2d7cf846b81b26dca0097bf2e1380f5b3c1 100644 | 
| --- a/media/filters/ffmpeg_demuxer.cc | 
| +++ b/media/filters/ffmpeg_demuxer.cc | 
| @@ -11,11 +11,10 @@ | 
| #include "base/bind.h" | 
| #include "base/callback.h" | 
| #include "base/callback_helpers.h" | 
| -#include "base/command_line.h" | 
| #include "base/memory/scoped_ptr.h" | 
| #include "base/message_loop/message_loop.h" | 
| #include "base/metrics/sparse_histogram.h" | 
| -#include "base/stl_util.h" | 
| +//#include "base/stl_util.h" | 
| 
 
acolwell GONE FROM CHROMIUM
2013/10/14 20:42:24
nit: remove
 
Matthew Heaney (Chromium)
2013/10/17 05:46:44
Done.
 
 | 
| #include "base/strings/string_util.h" | 
| #include "base/strings/stringprintf.h" | 
| #include "base/task_runner_util.h" | 
| @@ -26,7 +25,6 @@ | 
| #include "media/base/decrypt_config.h" | 
| #include "media/base/limits.h" | 
| #include "media/base/media_log.h" | 
| -#include "media/base/media_switches.h" | 
| #include "media/base/video_decoder_config.h" | 
| #include "media/ffmpeg/ffmpeg_common.h" | 
| #include "media/filters/ffmpeg_glue.h" | 
| @@ -64,6 +62,9 @@ FFmpegDemuxerStream::FFmpegDemuxerStream( | 
| AVStreamToVideoDecoderConfig(stream, &video_config_, true); | 
| is_encrypted = video_config_.is_encrypted(); | 
| break; | 
| + case AVMEDIA_TYPE_SUBTITLE: | 
| + type_ = TEXT; | 
| + break; | 
| default: | 
| NOTREACHED(); | 
| break; | 
| @@ -110,31 +111,62 @@ void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet) { | 
| LOG(ERROR) << "Format conversion failed."; | 
| } | 
| - // Get side data if any. For now, the only type of side_data is VP8 Alpha. We | 
| - // keep this generic so that other side_data types in the future can be | 
| - // handled the same way as well. | 
| - av_packet_split_side_data(packet.get()); | 
| - int side_data_size = 0; | 
| - uint8* side_data = av_packet_get_side_data( | 
| - packet.get(), | 
| - AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL, | 
| - &side_data_size); | 
| - | 
| - // If a packet is returned by FFmpeg's av_parser_parse2() the packet will | 
| - // reference inner memory of FFmpeg. As such we should transfer the packet | 
| - // into memory we control. | 
| scoped_refptr<DecoderBuffer> buffer; | 
| - if (side_data_size > 0) { | 
| + | 
| + // Get side data if any. For now, the only types of side_data are VP8 Alpha, | 
| + // and WebVTT id and settings. We keep this generic so that other side_data | 
| + // types in the future can be handled the same way as well. | 
| + av_packet_split_side_data(packet.get()); | 
| + if (type() == DemuxerStream::TEXT) { | 
| + int id_size = 0; | 
| + uint8* id_data = av_packet_get_side_data( | 
| + packet.get(), | 
| + AV_PKT_DATA_WEBVTT_IDENTIFIER, | 
| + &id_size); | 
| + | 
| + int settings_size = 0; | 
| + uint8* settings_data = av_packet_get_side_data( | 
| + packet.get(), | 
| + AV_PKT_DATA_WEBVTT_SETTINGS, | 
| + &settings_size); | 
| + | 
| + // The DecoderBuffer only supports a single side data item. In the case of | 
| + // a WebVTT cue, we can have potentially two side data items. In order to | 
| + // avoid disrupting DecoderBuffer any more than we need to, we copy both | 
| + // side data items onto a single one, and terminate each with a NUL marker. | 
| + std::vector<uint8> side_data; | 
| + side_data.reserve(id_size + 1 + settings_size + 1); | 
| + side_data.insert(side_data.end(), | 
| + id_data, id_data + id_size); | 
| + side_data.push_back(0); | 
| + side_data.insert(side_data.end(), | 
| + settings_data, settings_data + settings_size); | 
| + side_data.push_back(0); | 
| + | 
| buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size, | 
| - side_data, side_data_size); | 
| + side_data.data(), side_data.size()); | 
| } else { | 
| - buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size); | 
| + int side_data_size = 0; | 
| + uint8* side_data = av_packet_get_side_data( | 
| + packet.get(), | 
| + AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL, | 
| + &side_data_size); | 
| + | 
| + // If a packet is returned by FFmpeg's av_parser_parse2() the packet will | 
| + // reference inner memory of FFmpeg. As such we should transfer the packet | 
| + // into memory we control. | 
| + if (side_data_size > 0) { | 
| + buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size, | 
| + side_data, side_data_size); | 
| + } else { | 
| + buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size); | 
| + } | 
| } | 
| if ((type() == DemuxerStream::AUDIO && audio_config_.is_encrypted()) || | 
| (type() == DemuxerStream::VIDEO && video_config_.is_encrypted())) { | 
| scoped_ptr<DecryptConfig> config(WebMCreateDecryptConfig( | 
| - packet->data, packet->size, | 
| + packet->data, packet->size, | 
| reinterpret_cast<const uint8*>(encryption_key_id_.data()), | 
| encryption_key_id_.size())); | 
| if (!config) | 
| @@ -272,6 +304,27 @@ bool FFmpegDemuxerStream::HasAvailableCapacity() { | 
| return buffer_queue_.IsEmpty() || buffer_queue_.Duration() < kCapacity; | 
| } | 
| +TextKind FFmpegDemuxerStream::GetTextKind() const { | 
| + DCHECK_EQ(type_, DemuxerStream::TEXT); | 
| + | 
| + if (stream_->disposition & AV_DISPOSITION_CAPTIONS) | 
| + return kTextCaptions; | 
| + | 
| + if (stream_->disposition & AV_DISPOSITION_DESCRIPTIONS) | 
| + return kTextDescriptions; | 
| + | 
| + if (stream_->disposition & AV_DISPOSITION_METADATA) | 
| + return kTextMetadata; | 
| + | 
| + return kTextSubtitles; | 
| +} | 
| + | 
| +std::string FFmpegDemuxerStream::GetMetadata(const char* key) const { | 
| + const AVDictionaryEntry* entry = | 
| + av_dict_get(stream_->metadata, key, NULL, 0); | 
| + return (entry == NULL || entry->value == NULL) ? "" : entry->value; | 
| +} | 
| + | 
| // static | 
| base::TimeDelta FFmpegDemuxerStream::ConvertStreamTimestamp( | 
| const AVRational& time_base, int64 timestamp) { | 
| @@ -288,6 +341,7 @@ FFmpegDemuxer::FFmpegDemuxer( | 
| const scoped_refptr<base::MessageLoopProxy>& message_loop, | 
| DataSource* data_source, | 
| const NeedKeyCB& need_key_cb, | 
| + bool text_enabled, | 
| const scoped_refptr<MediaLog>& media_log) | 
| : host_(NULL), | 
| message_loop_(message_loop), | 
| @@ -300,6 +354,7 @@ FFmpegDemuxer::FFmpegDemuxer( | 
| bitrate_(0), | 
| start_time_(kNoTimestamp()), | 
| audio_disabled_(false), | 
| + text_enabled_(text_enabled), | 
| duration_known_(false), | 
| url_protocol_(data_source, BindToLoop(message_loop_, base::Bind( | 
| &FFmpegDemuxer::OnDataSourceError, base::Unretained(this)))), | 
| @@ -409,6 +464,22 @@ base::TimeDelta FFmpegDemuxer::GetStartTime() const { | 
| return start_time_; | 
| } | 
| +void FFmpegDemuxer::AddTextStreams() { | 
| + DCHECK(message_loop_->BelongsToCurrentThread()); | 
| + | 
| + for (StreamVector::size_type idx = 0; idx < streams_.size(); ++idx) { | 
| + FFmpegDemuxerStream* stream = streams_[idx]; | 
| + if (stream == NULL || stream->type() != DemuxerStream::TEXT) | 
| + continue; | 
| + | 
| + TextKind kind = stream->GetTextKind(); | 
| + std::string title = stream->GetMetadata("title"); | 
| + std::string language = stream->GetMetadata("language"); | 
| + | 
| + host_->AddTextStream(stream, kind, title, language); | 
| + } | 
| +} | 
| + | 
| // Helper for calculating the bitrate of the media based on information stored | 
| // in |format_context| or failing that the size and duration of the media. | 
| // | 
| @@ -527,6 +598,10 @@ void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb, | 
| if (!video_config.IsValidConfig()) | 
| continue; | 
| video_stream = stream; | 
| + } else if (codec_type == AVMEDIA_TYPE_SUBTITLE) { | 
| + if (codec_context->codec_id != AV_CODEC_ID_WEBVTT || !text_enabled_) { | 
| + continue; | 
| + } | 
| } else { | 
| continue; | 
| } | 
| @@ -547,6 +622,9 @@ void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb, | 
| return; | 
| } | 
| + if (text_enabled_) | 
| + AddTextStreams(); | 
| + | 
| if (format_context->duration != static_cast<int64_t>(AV_NOPTS_VALUE)) { | 
| // If there is a duration value in the container use that to find the | 
| // maximum between it and the duration from A/V streams. |