OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/filters/ffmpeg_video_decoder.h" | 5 #include "media/filters/ffmpeg_video_decoder.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <string> | 8 #include <string> |
9 | 9 |
10 #include "base/bind.h" | 10 #include "base/bind.h" |
11 #include "base/callback_helpers.h" | 11 #include "base/callback_helpers.h" |
12 #include "base/command_line.h" | 12 #include "base/command_line.h" |
13 #include "base/location.h" | 13 #include "base/location.h" |
14 #include "base/message_loop/message_loop_proxy.h" | 14 #include "base/message_loop/message_loop_proxy.h" |
15 #include "base/strings/string_number_conversions.h" | 15 #include "base/strings/string_number_conversions.h" |
16 #include "media/base/bind_to_loop.h" | 16 #include "media/base/bind_to_loop.h" |
17 #include "media/base/decoder_buffer.h" | 17 #include "media/base/decoder_buffer.h" |
18 #include "media/base/demuxer_stream.h" | |
19 #include "media/base/limits.h" | 18 #include "media/base/limits.h" |
20 #include "media/base/media_switches.h" | 19 #include "media/base/media_switches.h" |
21 #include "media/base/pipeline.h" | 20 #include "media/base/pipeline.h" |
22 #include "media/base/video_decoder_config.h" | 21 #include "media/base/video_decoder_config.h" |
23 #include "media/base/video_frame.h" | 22 #include "media/base/video_frame.h" |
24 #include "media/base/video_util.h" | 23 #include "media/base/video_util.h" |
25 #include "media/ffmpeg/ffmpeg_common.h" | 24 #include "media/ffmpeg/ffmpeg_common.h" |
26 #include "media/filters/ffmpeg_glue.h" | 25 #include "media/filters/ffmpeg_glue.h" |
27 | 26 |
28 namespace media { | 27 namespace media { |
(...skipping 25 matching lines...) Expand all Loading... |
54 decode_threads = std::min(decode_threads, kMaxDecodeThreads); | 53 decode_threads = std::min(decode_threads, kMaxDecodeThreads); |
55 return decode_threads; | 54 return decode_threads; |
56 } | 55 } |
57 | 56 |
58 FFmpegVideoDecoder::FFmpegVideoDecoder( | 57 FFmpegVideoDecoder::FFmpegVideoDecoder( |
59 const scoped_refptr<base::MessageLoopProxy>& message_loop) | 58 const scoped_refptr<base::MessageLoopProxy>& message_loop) |
60 : message_loop_(message_loop), | 59 : message_loop_(message_loop), |
61 weak_factory_(this), | 60 weak_factory_(this), |
62 state_(kUninitialized), | 61 state_(kUninitialized), |
63 codec_context_(NULL), | 62 codec_context_(NULL), |
64 av_frame_(NULL), | 63 av_frame_(NULL) { |
65 demuxer_stream_(NULL) { | |
66 } | 64 } |
67 | 65 |
68 int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context, | 66 int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context, |
69 AVFrame* frame) { | 67 AVFrame* frame) { |
70 // Don't use |codec_context_| here! With threaded decoding, | 68 // Don't use |codec_context_| here! With threaded decoding, |
71 // it will contain unsynchronized width/height/pix_fmt values, | 69 // it will contain unsynchronized width/height/pix_fmt values, |
72 // whereas |codec_context| contains the current threads's | 70 // whereas |codec_context| contains the current threads's |
73 // updated width/height/pix_fmt, which can change for adaptive | 71 // updated width/height/pix_fmt, which can change for adaptive |
74 // content. | 72 // content. |
75 VideoFrame::Format format = PixelFormatToVideoFormat(codec_context->pix_fmt); | 73 VideoFrame::Format format = PixelFormatToVideoFormat(codec_context->pix_fmt); |
76 if (format == VideoFrame::INVALID) | 74 if (format == VideoFrame::INVALID) |
77 return AVERROR(EINVAL); | 75 return AVERROR(EINVAL); |
78 DCHECK(format == VideoFrame::YV12 || format == VideoFrame::YV16); | 76 DCHECK(format == VideoFrame::YV12 || format == VideoFrame::YV16); |
79 | 77 |
80 gfx::Size size(codec_context->width, codec_context->height); | 78 gfx::Size size(codec_context->width, codec_context->height); |
81 int ret; | 79 int ret; |
82 if ((ret = av_image_check_size(size.width(), size.height(), 0, NULL)) < 0) | 80 if ((ret = av_image_check_size(size.width(), size.height(), 0, NULL)) < 0) |
83 return ret; | 81 return ret; |
84 | 82 |
85 gfx::Size natural_size; | 83 gfx::Size natural_size; |
86 if (codec_context->sample_aspect_ratio.num > 0) { | 84 if (codec_context->sample_aspect_ratio.num > 0) { |
87 natural_size = GetNaturalSize(size, | 85 natural_size = GetNaturalSize(size, |
88 codec_context->sample_aspect_ratio.num, | 86 codec_context->sample_aspect_ratio.num, |
89 codec_context->sample_aspect_ratio.den); | 87 codec_context->sample_aspect_ratio.den); |
90 } else { | 88 } else { |
91 natural_size = demuxer_stream_->video_decoder_config().natural_size(); | 89 natural_size = config_.natural_size(); |
92 } | 90 } |
93 | 91 |
94 if (!VideoFrame::IsValidConfig(format, size, gfx::Rect(size), natural_size)) | 92 if (!VideoFrame::IsValidConfig(format, size, gfx::Rect(size), natural_size)) |
95 return AVERROR(EINVAL); | 93 return AVERROR(EINVAL); |
96 | 94 |
97 scoped_refptr<VideoFrame> video_frame = | 95 scoped_refptr<VideoFrame> video_frame = |
98 VideoFrame::CreateFrame(format, size, gfx::Rect(size), natural_size, | 96 VideoFrame::CreateFrame(format, size, gfx::Rect(size), natural_size, |
99 kNoTimestamp()); | 97 kNoTimestamp()); |
100 | 98 |
101 for (int i = 0; i < 3; i++) { | 99 for (int i = 0; i < 3; i++) { |
(...skipping 22 matching lines...) Expand all Loading... |
124 static void ReleaseVideoBufferImpl(AVCodecContext* s, AVFrame* frame) { | 122 static void ReleaseVideoBufferImpl(AVCodecContext* s, AVFrame* frame) { |
125 scoped_refptr<VideoFrame> video_frame; | 123 scoped_refptr<VideoFrame> video_frame; |
126 video_frame.swap(reinterpret_cast<VideoFrame**>(&frame->opaque)); | 124 video_frame.swap(reinterpret_cast<VideoFrame**>(&frame->opaque)); |
127 | 125 |
128 // The FFmpeg API expects us to zero the data pointers in | 126 // The FFmpeg API expects us to zero the data pointers in |
129 // this callback | 127 // this callback |
130 memset(frame->data, 0, sizeof(frame->data)); | 128 memset(frame->data, 0, sizeof(frame->data)); |
131 frame->opaque = NULL; | 129 frame->opaque = NULL; |
132 } | 130 } |
133 | 131 |
134 void FFmpegVideoDecoder::Initialize(DemuxerStream* stream, | 132 void FFmpegVideoDecoder::Initialize(const VideoDecoderConfig& config, |
135 const PipelineStatusCB& status_cb, | 133 const PipelineStatusCB& status_cb, |
136 const StatisticsCB& statistics_cb) { | 134 const StatisticsCB& statistics_cb) { |
137 DCHECK(message_loop_->BelongsToCurrentThread()); | 135 DCHECK(message_loop_->BelongsToCurrentThread()); |
138 DCHECK(stream); | |
139 DCHECK(read_cb_.is_null()); | 136 DCHECK(read_cb_.is_null()); |
140 DCHECK(reset_cb_.is_null()); | 137 DCHECK(reset_cb_.is_null()); |
| 138 DCHECK(!config.is_encrypted()); |
141 | 139 |
142 FFmpegGlue::InitializeFFmpeg(); | 140 FFmpegGlue::InitializeFFmpeg(); |
143 weak_this_ = weak_factory_.GetWeakPtr(); | 141 weak_this_ = weak_factory_.GetWeakPtr(); |
144 | 142 |
145 demuxer_stream_ = stream; | 143 config_ = config; |
146 statistics_cb_ = statistics_cb; | 144 statistics_cb_ = statistics_cb; |
147 PipelineStatusCB initialize_cb = BindToCurrentLoop(status_cb); | 145 PipelineStatusCB initialize_cb = BindToCurrentLoop(status_cb); |
148 | 146 |
149 if (!ConfigureDecoder()) { | 147 if (!config.IsValidConfig() || !ConfigureDecoder()) { |
150 initialize_cb.Run(DECODER_ERROR_NOT_SUPPORTED); | 148 initialize_cb.Run(DECODER_ERROR_NOT_SUPPORTED); |
151 return; | 149 return; |
152 } | 150 } |
153 | 151 |
154 // Success! | 152 // Success! |
155 state_ = kNormal; | 153 state_ = kNormal; |
156 initialize_cb.Run(PIPELINE_OK); | 154 initialize_cb.Run(PIPELINE_OK); |
157 } | 155 } |
158 | 156 |
159 void FFmpegVideoDecoder::Read(const ReadCB& read_cb) { | 157 void FFmpegVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer, |
| 158 const ReadCB& read_cb) { |
160 DCHECK(message_loop_->BelongsToCurrentThread()); | 159 DCHECK(message_loop_->BelongsToCurrentThread()); |
161 DCHECK(!read_cb.is_null()); | 160 DCHECK(!read_cb.is_null()); |
162 CHECK_NE(state_, kUninitialized); | 161 CHECK_NE(state_, kUninitialized); |
163 CHECK(read_cb_.is_null()) << "Overlapping decodes are not supported."; | 162 CHECK(read_cb_.is_null()) << "Overlapping decodes are not supported."; |
164 read_cb_ = BindToCurrentLoop(read_cb); | 163 read_cb_ = BindToCurrentLoop(read_cb); |
165 | 164 |
166 if (state_ == kError) { | 165 if (state_ == kError) { |
167 base::ResetAndReturn(&read_cb_).Run(kDecodeError, NULL); | 166 base::ResetAndReturn(&read_cb_).Run(kDecodeError, NULL); |
168 return; | 167 return; |
169 } | 168 } |
170 | 169 |
171 // Return empty frames if decoding has finished. | 170 // Return empty frames if decoding has finished. |
172 if (state_ == kDecodeFinished) { | 171 if (state_ == kDecodeFinished) { |
173 base::ResetAndReturn(&read_cb_).Run(kOk, VideoFrame::CreateEmptyFrame()); | 172 base::ResetAndReturn(&read_cb_).Run(kOk, VideoFrame::CreateEmptyFrame()); |
174 return; | 173 return; |
175 } | 174 } |
176 | 175 |
177 ReadFromDemuxerStream(); | 176 DecodeBuffer(buffer); |
178 } | 177 } |
179 | 178 |
180 void FFmpegVideoDecoder::Reset(const base::Closure& closure) { | 179 void FFmpegVideoDecoder::Reset(const base::Closure& closure) { |
181 DCHECK(message_loop_->BelongsToCurrentThread()); | 180 DCHECK(message_loop_->BelongsToCurrentThread()); |
182 DCHECK(reset_cb_.is_null()); | 181 DCHECK(reset_cb_.is_null()); |
183 reset_cb_ = BindToCurrentLoop(closure); | 182 reset_cb_ = BindToCurrentLoop(closure); |
184 | 183 |
185 // Defer the reset if a read is pending. | 184 // Defer the reset if a read is pending. |
186 if (!read_cb_.is_null()) | 185 if (!read_cb_.is_null()) |
187 return; | 186 return; |
(...skipping 26 matching lines...) Expand all Loading... |
214 ReleaseFFmpegResources(); | 213 ReleaseFFmpegResources(); |
215 state_ = kUninitialized; | 214 state_ = kUninitialized; |
216 } | 215 } |
217 | 216 |
218 FFmpegVideoDecoder::~FFmpegVideoDecoder() { | 217 FFmpegVideoDecoder::~FFmpegVideoDecoder() { |
219 DCHECK_EQ(kUninitialized, state_); | 218 DCHECK_EQ(kUninitialized, state_); |
220 DCHECK(!codec_context_); | 219 DCHECK(!codec_context_); |
221 DCHECK(!av_frame_); | 220 DCHECK(!av_frame_); |
222 } | 221 } |
223 | 222 |
224 void FFmpegVideoDecoder::ReadFromDemuxerStream() { | |
225 DCHECK_NE(state_, kUninitialized); | |
226 DCHECK_NE(state_, kDecodeFinished); | |
227 DCHECK_NE(state_, kError); | |
228 DCHECK(!read_cb_.is_null()); | |
229 | |
230 demuxer_stream_->Read(base::Bind( | |
231 &FFmpegVideoDecoder::BufferReady, weak_this_)); | |
232 } | |
233 | |
234 void FFmpegVideoDecoder::BufferReady( | |
235 DemuxerStream::Status status, | |
236 const scoped_refptr<DecoderBuffer>& buffer) { | |
237 DCHECK(message_loop_->BelongsToCurrentThread()); | |
238 DCHECK_NE(state_, kDecodeFinished); | |
239 DCHECK_NE(state_, kError); | |
240 DCHECK_EQ(status != DemuxerStream::kOk, !buffer.get()) << status; | |
241 | |
242 if (state_ == kUninitialized) | |
243 return; | |
244 | |
245 DCHECK(!read_cb_.is_null()); | |
246 | |
247 if (!reset_cb_.is_null()) { | |
248 base::ResetAndReturn(&read_cb_).Run(kOk, NULL); | |
249 DoReset(); | |
250 return; | |
251 } | |
252 | |
253 if (status == DemuxerStream::kAborted) { | |
254 base::ResetAndReturn(&read_cb_).Run(kOk, NULL); | |
255 return; | |
256 } | |
257 | |
258 // VideoFrameStream ensures no kConfigChanged is passed to VideoDecoders. | |
259 DCHECK_EQ(status, DemuxerStream::kOk) << status; | |
260 DecodeBuffer(buffer); | |
261 } | |
262 | |
263 void FFmpegVideoDecoder::DecodeBuffer( | 223 void FFmpegVideoDecoder::DecodeBuffer( |
264 const scoped_refptr<DecoderBuffer>& buffer) { | 224 const scoped_refptr<DecoderBuffer>& buffer) { |
265 DCHECK(message_loop_->BelongsToCurrentThread()); | 225 DCHECK(message_loop_->BelongsToCurrentThread()); |
266 DCHECK_NE(state_, kUninitialized); | 226 DCHECK_NE(state_, kUninitialized); |
267 DCHECK_NE(state_, kDecodeFinished); | 227 DCHECK_NE(state_, kDecodeFinished); |
268 DCHECK_NE(state_, kError); | 228 DCHECK_NE(state_, kError); |
269 DCHECK(reset_cb_.is_null()); | 229 DCHECK(reset_cb_.is_null()); |
270 DCHECK(!read_cb_.is_null()); | 230 DCHECK(!read_cb_.is_null()); |
271 DCHECK(buffer.get()); | 231 DCHECK(buffer); |
272 | 232 |
273 // During decode, because reads are issued asynchronously, it is possible to | 233 // During decode, because reads are issued asynchronously, it is possible to |
274 // receive multiple end of stream buffers since each read is acked. When the | 234 // receive multiple end of stream buffers since each read is acked. When the |
275 // first end of stream buffer is read, FFmpeg may still have frames queued | 235 // first end of stream buffer is read, FFmpeg may still have frames queued |
276 // up in the decoder so we need to go through the decode loop until it stops | 236 // up in the decoder so we need to go through the decode loop until it stops |
277 // giving sensible data. After that, the decoder should output empty | 237 // giving sensible data. After that, the decoder should output empty |
278 // frames. There are three states the decoder can be in: | 238 // frames. There are three states the decoder can be in: |
279 // | 239 // |
280 // kNormal: This is the starting state. Buffers are decoded. Decode errors | 240 // kNormal: This is the starting state. Buffers are decoded. Decode errors |
281 // are discarded. | 241 // are discarded. |
(...skipping 15 matching lines...) Expand all Loading... |
297 // When avcodec_decode_video2() errors out. | 257 // When avcodec_decode_video2() errors out. |
298 // (any state) -> kNormal: | 258 // (any state) -> kNormal: |
299 // Any time Reset() is called. | 259 // Any time Reset() is called. |
300 | 260 |
301 // Transition to kFlushCodec on the first end of stream buffer. | 261 // Transition to kFlushCodec on the first end of stream buffer. |
302 if (state_ == kNormal && buffer->IsEndOfStream()) { | 262 if (state_ == kNormal && buffer->IsEndOfStream()) { |
303 state_ = kFlushCodec; | 263 state_ = kFlushCodec; |
304 } | 264 } |
305 | 265 |
306 scoped_refptr<VideoFrame> video_frame; | 266 scoped_refptr<VideoFrame> video_frame; |
307 if (!Decode(buffer, &video_frame)) { | 267 if (!FFmpegDecode(buffer, &video_frame)) { |
308 state_ = kError; | 268 state_ = kError; |
309 base::ResetAndReturn(&read_cb_).Run(kDecodeError, NULL); | 269 base::ResetAndReturn(&read_cb_).Run(kDecodeError, NULL); |
310 return; | 270 return; |
311 } | 271 } |
312 | 272 |
313 // Any successful decode counts! | 273 // Any successful decode counts! |
314 if (!buffer->IsEndOfStream() && buffer->GetDataSize() > 0) { | 274 if (!buffer->IsEndOfStream() && buffer->GetDataSize() > 0) { |
315 PipelineStatistics statistics; | 275 PipelineStatistics statistics; |
316 statistics.video_bytes_decoded = buffer->GetDataSize(); | 276 statistics.video_bytes_decoded = buffer->GetDataSize(); |
317 statistics_cb_.Run(statistics); | 277 statistics_cb_.Run(statistics); |
318 } | 278 } |
319 | 279 |
320 if (!video_frame.get()) { | 280 if (!video_frame.get()) { |
321 if (state_ == kFlushCodec) { | 281 if (state_ == kFlushCodec) { |
322 DCHECK(buffer->IsEndOfStream()); | 282 DCHECK(buffer->IsEndOfStream()); |
323 state_ = kDecodeFinished; | 283 state_ = kDecodeFinished; |
324 base::ResetAndReturn(&read_cb_).Run(kOk, VideoFrame::CreateEmptyFrame()); | 284 base::ResetAndReturn(&read_cb_).Run(kOk, VideoFrame::CreateEmptyFrame()); |
325 return; | 285 return; |
326 } | 286 } |
327 | 287 |
328 ReadFromDemuxerStream(); | 288 base::ResetAndReturn(&read_cb_).Run(kNotEnoughData, NULL); |
329 return; | 289 return; |
330 } | 290 } |
331 | 291 |
332 base::ResetAndReturn(&read_cb_).Run(kOk, video_frame); | 292 base::ResetAndReturn(&read_cb_).Run(kOk, video_frame); |
333 } | 293 } |
334 | 294 |
335 bool FFmpegVideoDecoder::Decode( | 295 bool FFmpegVideoDecoder::FFmpegDecode( |
336 const scoped_refptr<DecoderBuffer>& buffer, | 296 const scoped_refptr<DecoderBuffer>& buffer, |
337 scoped_refptr<VideoFrame>* video_frame) { | 297 scoped_refptr<VideoFrame>* video_frame) { |
338 DCHECK(video_frame); | 298 DCHECK(video_frame); |
339 | 299 |
340 // Reset frame to default values. | 300 // Reset frame to default values. |
341 avcodec_get_frame_defaults(av_frame_); | 301 avcodec_get_frame_defaults(av_frame_); |
342 | 302 |
343 // Create a packet for input data. | 303 // Create a packet for input data. |
344 // Due to FFmpeg API changes we no longer have const read-only pointers. | 304 // Due to FFmpeg API changes we no longer have const read-only pointers. |
345 AVPacket packet; | 305 AVPacket packet; |
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
410 av_free(codec_context_); | 370 av_free(codec_context_); |
411 codec_context_ = NULL; | 371 codec_context_ = NULL; |
412 } | 372 } |
413 if (av_frame_) { | 373 if (av_frame_) { |
414 av_free(av_frame_); | 374 av_free(av_frame_); |
415 av_frame_ = NULL; | 375 av_frame_ = NULL; |
416 } | 376 } |
417 } | 377 } |
418 | 378 |
419 bool FFmpegVideoDecoder::ConfigureDecoder() { | 379 bool FFmpegVideoDecoder::ConfigureDecoder() { |
420 const VideoDecoderConfig& config = demuxer_stream_->video_decoder_config(); | |
421 | |
422 if (!config.IsValidConfig()) { | |
423 DLOG(ERROR) << "Invalid video stream - " << config.AsHumanReadableString(); | |
424 return false; | |
425 } | |
426 | |
427 if (config.is_encrypted()) { | |
428 DLOG(ERROR) << "Encrypted video stream not supported."; | |
429 return false; | |
430 } | |
431 | |
432 // Release existing decoder resources if necessary. | 380 // Release existing decoder resources if necessary. |
433 ReleaseFFmpegResources(); | 381 ReleaseFFmpegResources(); |
434 | 382 |
435 // Initialize AVCodecContext structure. | 383 // Initialize AVCodecContext structure. |
436 codec_context_ = avcodec_alloc_context3(NULL); | 384 codec_context_ = avcodec_alloc_context3(NULL); |
437 VideoDecoderConfigToAVCodecContext(config, codec_context_); | 385 VideoDecoderConfigToAVCodecContext(config_, codec_context_); |
438 | 386 |
439 // Enable motion vector search (potentially slow), strong deblocking filter | 387 // Enable motion vector search (potentially slow), strong deblocking filter |
440 // for damaged macroblocks, and set our error detection sensitivity. | 388 // for damaged macroblocks, and set our error detection sensitivity. |
441 codec_context_->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK; | 389 codec_context_->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK; |
442 codec_context_->thread_count = GetThreadCount(codec_context_->codec_id); | 390 codec_context_->thread_count = GetThreadCount(codec_context_->codec_id); |
443 codec_context_->opaque = this; | 391 codec_context_->opaque = this; |
444 codec_context_->flags |= CODEC_FLAG_EMU_EDGE; | 392 codec_context_->flags |= CODEC_FLAG_EMU_EDGE; |
445 codec_context_->get_buffer = GetVideoBufferImpl; | 393 codec_context_->get_buffer = GetVideoBufferImpl; |
446 codec_context_->release_buffer = ReleaseVideoBufferImpl; | 394 codec_context_->release_buffer = ReleaseVideoBufferImpl; |
447 | 395 |
448 AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); | 396 AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); |
449 if (!codec || avcodec_open2(codec_context_, codec, NULL) < 0) { | 397 if (!codec || avcodec_open2(codec_context_, codec, NULL) < 0) { |
450 ReleaseFFmpegResources(); | 398 ReleaseFFmpegResources(); |
451 return false; | 399 return false; |
452 } | 400 } |
453 | 401 |
454 av_frame_ = avcodec_alloc_frame(); | 402 av_frame_ = avcodec_alloc_frame(); |
455 return true; | 403 return true; |
456 } | 404 } |
457 | 405 |
458 } // namespace media | 406 } // namespace media |
OLD | NEW |