OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/filters/ffmpeg_video_decoder.h" | 5 #include "media/filters/ffmpeg_video_decoder.h" |
6 | 6 |
7 #include "base/bind.h" | 7 #include "base/bind.h" |
8 #include "base/callback_helpers.h" | 8 #include "base/callback_helpers.h" |
9 #include "base/command_line.h" | 9 #include "base/command_line.h" |
10 #include "base/message_loop.h" | 10 #include "base/message_loop.h" |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
54 : message_loop_factory_cb_(message_loop_cb), | 54 : message_loop_factory_cb_(message_loop_cb), |
55 message_loop_(NULL), | 55 message_loop_(NULL), |
56 state_(kUninitialized), | 56 state_(kUninitialized), |
57 codec_context_(NULL), | 57 codec_context_(NULL), |
58 av_frame_(NULL), | 58 av_frame_(NULL), |
59 frame_rate_numerator_(0), | 59 frame_rate_numerator_(0), |
60 frame_rate_denominator_(0), | 60 frame_rate_denominator_(0), |
61 decryptor_(NULL) { | 61 decryptor_(NULL) { |
62 } | 62 } |
63 | 63 |
64 int FFmpegVideoDecoder::GetVideoBuffer(AVFrame *frame) { | |
scherkus (not reviewing)
2012/06/14 02:21:33
pointer w/ type
| |
65 VideoFrame::Format format = PixelFormatToVideoFormat(codec_context_->pix_fmt); | |
66 if (format == VideoFrame::INVALID) | |
67 return AVERROR(EINVAL); | |
68 DCHECK(format == VideoFrame::YV12 || format == VideoFrame::YV16); | |
69 | |
70 int w = codec_context_->width, h = codec_context_->height, ret; | |
scherkus (not reviewing)
2012/06/14 02:21:33
split each initializer onto each line
s/w/width/
| |
71 if ((ret = av_image_check_size(w, h, 0, NULL)) < 0) | |
72 return ret; | |
73 | |
74 scoped_refptr<VideoFrame> buf = VideoFrame::CreateFrame(format, | |
scherkus (not reviewing)
2012/06/14 02:21:33
drop this to next line + 4-space indent
scherkus (not reviewing)
2012/06/14 02:21:33
nit: s/buf/video_frame/
rest of this file uses sa
| |
75 w, h, 16u, kNoTimestamp(), kNoTimestamp()); | |
76 | |
77 for (int i = 0; i < 3; i++) { | |
78 frame->base[i] = buf->data(i); | |
79 frame->data[i] = buf->data(i); | |
80 frame->linesize[i] = buf->stride(i); | |
81 } | |
82 | |
83 frame->opaque = buf.release(); | |
84 frame->type = FF_BUFFER_TYPE_USER; | |
85 frame->pkt_pts = codec_context_->pkt ? codec_context_->pkt->pts : AV_NOPTS_VAL UE; | |
scherkus (not reviewing)
2012/06/14 02:21:33
fix >80 chars
| |
86 frame->width = codec_context_->width; | |
87 frame->height = codec_context_->height; | |
88 frame->format = codec_context_->pix_fmt; | |
89 | |
90 return 0; | |
91 } | |
92 | |
93 static int callbackGetVideoBuffer(AVCodecContext *s, AVFrame *frame) { | |
scherkus (not reviewing)
2012/06/14 02:21:33
pointer w/ type here + below
scherkus (not reviewing)
2012/06/14 02:21:33
we don't do camelCase code around here but AllCaps
| |
94 FFmpegVideoDecoder *vd = static_cast<FFmpegVideoDecoder *>(s->opaque); | |
95 return vd->GetVideoBuffer(frame); | |
96 } | |
97 | |
98 static void callbackReleaseVideoBuffer(AVCodecContext *s, AVFrame *frame) { | |
scherkus (not reviewing)
2012/06/14 02:21:33
pointer w/ type here + below
| |
99 // We're releasing the refenence to the buffer allocated in | |
100 // GetVideoBuffer() here, so the explicit Release() here is | |
101 // intentional. Would be nice if scoped_refptr::adopt existed. | |
scherkus (not reviewing)
2012/06/14 02:21:33
I'd drop the "Would be nice.." part -- it ain't go
| |
102 scoped_refptr<VideoFrame> buf = static_cast<VideoFrame *>(frame->opaque); | |
scherkus (not reviewing)
2012/06/14 02:21:33
s/buf/video_frame
| |
103 buf->Release(); | |
104 memset(frame->data, 0, sizeof(frame->data)); | |
105 frame->opaque = NULL; | |
106 } | |
107 | |
64 void FFmpegVideoDecoder::Initialize(const scoped_refptr<DemuxerStream>& stream, | 108 void FFmpegVideoDecoder::Initialize(const scoped_refptr<DemuxerStream>& stream, |
65 const PipelineStatusCB& status_cb, | 109 const PipelineStatusCB& status_cb, |
66 const StatisticsCB& statistics_cb) { | 110 const StatisticsCB& statistics_cb) { |
67 if (!message_loop_) { | 111 if (!message_loop_) { |
68 message_loop_ = message_loop_factory_cb_.Run(); | 112 message_loop_ = message_loop_factory_cb_.Run(); |
69 message_loop_factory_cb_.Reset(); | 113 message_loop_factory_cb_.Reset(); |
70 | 114 |
71 message_loop_->PostTask(FROM_HERE, base::Bind( | 115 message_loop_->PostTask(FROM_HERE, base::Bind( |
72 &FFmpegVideoDecoder::Initialize, this, | 116 &FFmpegVideoDecoder::Initialize, this, |
73 stream, status_cb, statistics_cb)); | 117 stream, status_cb, statistics_cb)); |
(...skipping 23 matching lines...) Expand all Loading... | |
97 | 141 |
98 // Initialize AVCodecContext structure. | 142 // Initialize AVCodecContext structure. |
99 codec_context_ = avcodec_alloc_context(); | 143 codec_context_ = avcodec_alloc_context(); |
100 VideoDecoderConfigToAVCodecContext(config, codec_context_); | 144 VideoDecoderConfigToAVCodecContext(config, codec_context_); |
101 | 145 |
102 // Enable motion vector search (potentially slow), strong deblocking filter | 146 // Enable motion vector search (potentially slow), strong deblocking filter |
103 // for damaged macroblocks, and set our error detection sensitivity. | 147 // for damaged macroblocks, and set our error detection sensitivity. |
104 codec_context_->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK; | 148 codec_context_->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK; |
105 codec_context_->err_recognition = AV_EF_CAREFUL; | 149 codec_context_->err_recognition = AV_EF_CAREFUL; |
106 codec_context_->thread_count = GetThreadCount(codec_context_->codec_id); | 150 codec_context_->thread_count = GetThreadCount(codec_context_->codec_id); |
151 codec_context_->opaque = this; | |
152 codec_context_->flags |= CODEC_FLAG_EMU_EDGE; | |
153 codec_context_->get_buffer = callbackGetVideoBuffer; | |
154 codec_context_->release_buffer = callbackReleaseVideoBuffer; | |
107 | 155 |
108 AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); | 156 AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); |
109 if (!codec) { | 157 if (!codec) { |
110 status_cb.Run(PIPELINE_ERROR_DECODE); | 158 status_cb.Run(PIPELINE_ERROR_DECODE); |
111 return; | 159 return; |
112 } | 160 } |
113 | 161 |
114 if (avcodec_open2(codec_context_, codec, NULL) < 0) { | 162 if (avcodec_open2(codec_context_, codec, NULL) < 0) { |
115 status_cb.Run(PIPELINE_ERROR_DECODE); | 163 status_cb.Run(PIPELINE_ERROR_DECODE); |
116 return; | 164 return; |
(...skipping 238 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
355 // The decoder is in a bad state and not decoding correctly. | 403 // The decoder is in a bad state and not decoding correctly. |
356 // Checking for NULL avoids a crash in CopyPlane(). | 404 // Checking for NULL avoids a crash in CopyPlane(). |
357 if (!av_frame_->data[VideoFrame::kYPlane] || | 405 if (!av_frame_->data[VideoFrame::kYPlane] || |
358 !av_frame_->data[VideoFrame::kUPlane] || | 406 !av_frame_->data[VideoFrame::kUPlane] || |
359 !av_frame_->data[VideoFrame::kVPlane]) { | 407 !av_frame_->data[VideoFrame::kVPlane]) { |
360 LOG(ERROR) << "Video frame was produced yet has invalid frame data."; | 408 LOG(ERROR) << "Video frame was produced yet has invalid frame data."; |
361 *video_frame = NULL; | 409 *video_frame = NULL; |
362 return false; | 410 return false; |
363 } | 411 } |
364 | 412 |
365 // We've got a frame! Make sure we have a place to store it. | 413 if (!av_frame_->opaque) { |
366 *video_frame = AllocateVideoFrame(); | 414 LOG(ERROR) << "VideoFrame object associated with frame data not set."; |
367 if (!(*video_frame)) { | |
368 LOG(ERROR) << "Failed to allocate video frame"; | |
369 return false; | 415 return false; |
370 } | 416 } |
417 *video_frame = static_cast<VideoFrame *>(av_frame_->opaque); | |
371 | 418 |
372 // Determine timestamp and calculate the duration based on the repeat picture | 419 // Determine timestamp and calculate the duration based on the repeat picture |
373 // count. According to FFmpeg docs, the total duration can be calculated as | 420 // count. According to FFmpeg docs, the total duration can be calculated as |
374 // follows: | 421 // follows: |
375 // fps = 1 / time_base | 422 // fps = 1 / time_base |
376 // | 423 // |
377 // duration = (1 / fps) + (repeat_pict) / (2 * fps) | 424 // duration = (1 / fps) + (repeat_pict) / (2 * fps) |
378 // = (2 + repeat_pict) / (2 * fps) | 425 // = (2 + repeat_pict) / (2 * fps) |
379 // = (2 + repeat_pict) / (2 * (1 / time_base)) | 426 // = (2 + repeat_pict) / (2 * (1 / time_base)) |
380 DCHECK_LE(av_frame_->repeat_pict, 2); // Sanity check. | 427 DCHECK_LE(av_frame_->repeat_pict, 2); // Sanity check. |
381 AVRational doubled_time_base; | 428 AVRational doubled_time_base; |
382 doubled_time_base.num = frame_rate_denominator_; | 429 doubled_time_base.num = frame_rate_denominator_; |
383 doubled_time_base.den = frame_rate_numerator_ * 2; | 430 doubled_time_base.den = frame_rate_numerator_ * 2; |
384 | 431 |
385 (*video_frame)->SetTimestamp( | 432 (*video_frame)->SetTimestamp( |
386 base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque)); | 433 base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque)); |
387 (*video_frame)->SetDuration( | 434 (*video_frame)->SetDuration( |
388 ConvertFromTimeBase(doubled_time_base, 2 + av_frame_->repeat_pict)); | 435 ConvertFromTimeBase(doubled_time_base, 2 + av_frame_->repeat_pict)); |
389 | 436 |
390 // Copy the frame data since FFmpeg reuses internal buffers for AVFrame | |
391 // output, meaning the data is only valid until the next | |
392 // avcodec_decode_video() call. | |
393 int y_rows = codec_context_->height; | |
394 int uv_rows = codec_context_->height; | |
395 if (codec_context_->pix_fmt == PIX_FMT_YUV420P) { | |
396 uv_rows /= 2; | |
397 } | |
398 | |
399 CopyYPlane(av_frame_->data[0], av_frame_->linesize[0], y_rows, *video_frame); | |
400 CopyUPlane(av_frame_->data[1], av_frame_->linesize[1], uv_rows, *video_frame); | |
401 CopyVPlane(av_frame_->data[2], av_frame_->linesize[2], uv_rows, *video_frame); | |
402 | |
403 return true; | 437 return true; |
404 } | 438 } |
405 | 439 |
406 void FFmpegVideoDecoder::DeliverFrame( | 440 void FFmpegVideoDecoder::DeliverFrame( |
407 const scoped_refptr<VideoFrame>& video_frame) { | 441 const scoped_refptr<VideoFrame>& video_frame) { |
408 // Reset the callback before running to protect against reentrancy. | 442 // Reset the callback before running to protect against reentrancy. |
409 base::ResetAndReturn(&read_cb_).Run(kOk, video_frame); | 443 base::ResetAndReturn(&read_cb_).Run(kOk, video_frame); |
410 } | 444 } |
411 | 445 |
412 void FFmpegVideoDecoder::ReleaseFFmpegResources() { | 446 void FFmpegVideoDecoder::ReleaseFFmpegResources() { |
413 if (codec_context_) { | 447 if (codec_context_) { |
414 av_free(codec_context_->extradata); | 448 av_free(codec_context_->extradata); |
415 avcodec_close(codec_context_); | 449 avcodec_close(codec_context_); |
416 av_free(codec_context_); | 450 av_free(codec_context_); |
417 codec_context_ = NULL; | 451 codec_context_ = NULL; |
418 } | 452 } |
419 if (av_frame_) { | 453 if (av_frame_) { |
420 av_free(av_frame_); | 454 av_free(av_frame_); |
421 av_frame_ = NULL; | 455 av_frame_ = NULL; |
422 } | 456 } |
423 } | 457 } |
424 | 458 |
425 scoped_refptr<VideoFrame> FFmpegVideoDecoder::AllocateVideoFrame() { | |
426 VideoFrame::Format format = PixelFormatToVideoFormat(codec_context_->pix_fmt); | |
427 size_t width = codec_context_->width; | |
428 size_t height = codec_context_->height; | |
429 | |
430 return VideoFrame::CreateFrame(format, width, height, | |
431 kNoTimestamp(), kNoTimestamp()); | |
432 } | |
433 | |
434 } // namespace media | 459 } // namespace media |
OLD | NEW |