Index: content/common/gpu/media/vt_video_decode_accelerator.cc |
diff --git a/content/common/gpu/media/vt_video_decode_accelerator.cc b/content/common/gpu/media/vt_video_decode_accelerator.cc |
index b09a5e3e0c3ee36cc1ba0a1bf145be0e6fc42879..5b787125677cc9eb0369040ee6dc428ec73cab4a 100644 |
--- a/content/common/gpu/media/vt_video_decode_accelerator.cc |
+++ b/content/common/gpu/media/vt_video_decode_accelerator.cc |
@@ -7,7 +7,6 @@ |
#include <OpenGL/gl.h> |
#include "base/bind.h" |
-#include "base/callback_helpers.h" |
#include "base/command_line.h" |
#include "base/sys_byteorder.h" |
#include "base/thread_task_runner_handle.h" |
@@ -21,20 +20,21 @@ using content_common_gpu_media::InitializeStubs; |
using content_common_gpu_media::IsVtInitialized; |
using content_common_gpu_media::StubPathMap; |
-#define NOTIFY_STATUS(name, status) \ |
- do { \ |
- LOG(ERROR) << name << " failed with status " << status; \ |
- NotifyError(PLATFORM_FAILURE); \ |
+#define NOTIFY_STATUS(name, status) \ |
+ do { \ |
+ DLOG(ERROR) << name << " failed with status " << status; \ |
+ NotifyError(PLATFORM_FAILURE); \ |
} while (0) |
namespace content { |
-// Size of NALU length headers in AVCC/MPEG-4 format (can be 1, 2, or 4). |
+// Size to use for NALU length headers in AVC format (can be 1, 2, or 4). |
static const int kNALUHeaderLength = 4; |
-// We only request 5 picture buffers from the client which are used to hold the |
-// decoded samples. These buffers are then reused when the client tells us that |
-// it is done with the buffer. |
+// We request 5 picture buffers from the client, each of which has a texture ID |
+// that we can bind decoded frames to. We need enough to satisfy preroll, and |
+// enough to avoid unnecessary stalling, but no more than that. The resource |
+// requirements are low, as we don't need the textures to be backed by storage. |
static const int kNumPictureBuffers = 5; |
Pawel Osciak
2014/11/12 12:57:30
Would we want to use media::limits::kMaxVideoFrame
sandersd (OOO until July 31)
2014/11/13 00:05:45
media::limits::kMaxVideoFrames + 1 seems to be a p
Pawel Osciak
2014/11/15 03:28:02
Yes, at least +1 is what I meant, thanks. The reas
|
// Route decoded frame callbacks back into the VTVideoDecodeAccelerator. |
@@ -48,28 +48,20 @@ static void OutputThunk( |
CMTime presentation_duration) { |
VTVideoDecodeAccelerator* vda = |
reinterpret_cast<VTVideoDecodeAccelerator*>(decompression_output_refcon); |
- int32_t bitstream_id = reinterpret_cast<intptr_t>(source_frame_refcon); |
- vda->Output(bitstream_id, status, image_buffer); |
+ vda->Output(source_frame_refcon, status, image_buffer); |
} |
-VTVideoDecodeAccelerator::DecodedFrame::DecodedFrame( |
- int32_t bitstream_id, |
- CVImageBufferRef image_buffer) |
- : bitstream_id(bitstream_id), |
- image_buffer(image_buffer) { |
+VTVideoDecodeAccelerator::Task::Task(TaskType type) : type(type) { |
} |
-VTVideoDecodeAccelerator::DecodedFrame::~DecodedFrame() { |
+VTVideoDecodeAccelerator::Task::~Task() { |
} |
-VTVideoDecodeAccelerator::PendingAction::PendingAction( |
- Action action, |
- int32_t bitstream_id) |
- : action(action), |
- bitstream_id(bitstream_id) { |
+VTVideoDecodeAccelerator::Frame::Frame(int32_t bitstream_id) |
+ : bitstream_id(bitstream_id) { |
} |
-VTVideoDecodeAccelerator::PendingAction::~PendingAction() { |
+VTVideoDecodeAccelerator::Frame::~Frame() { |
} |
VTVideoDecodeAccelerator::VTVideoDecodeAccelerator( |
@@ -78,7 +70,7 @@ VTVideoDecodeAccelerator::VTVideoDecodeAccelerator( |
: cgl_context_(cgl_context), |
make_context_current_(make_context_current), |
client_(NULL), |
- has_error_(false), |
+ state_(STATE_NORMAL), |
format_(NULL), |
session_(NULL), |
gpu_task_runner_(base::ThreadTaskRunnerHandle::Get()), |
@@ -95,7 +87,7 @@ VTVideoDecodeAccelerator::~VTVideoDecodeAccelerator() { |
bool VTVideoDecodeAccelerator::Initialize( |
media::VideoCodecProfile profile, |
Client* client) { |
- DCHECK(CalledOnValidThread()); |
+ DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
client_ = client; |
// Only H.264 is supported. |
@@ -126,10 +118,36 @@ bool VTVideoDecodeAccelerator::Initialize( |
return true; |
} |
-bool VTVideoDecodeAccelerator::ConfigureDecoder( |
- const std::vector<const uint8_t*>& nalu_data_ptrs, |
- const std::vector<size_t>& nalu_data_sizes) { |
+bool VTVideoDecodeAccelerator::FinishDelayedFrames() { |
DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread()); |
+ if (session_) { |
+ OSStatus status = VTDecompressionSessionFinishDelayedFrames(session_); |
+ if (status) { |
+ NOTIFY_STATUS("VTDecompressionSessionFinishDelayedFrames()", status); |
+ return false; |
+ } |
+ } |
+ return true; |
+} |
+ |
+bool VTVideoDecodeAccelerator::ConfigureDecoder() { |
+ DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread()); |
+ DCHECK(!last_sps_.empty()); |
+ DCHECK(!last_pps_.empty()); |
+ |
+ // Build the configuration records. |
+ std::vector<const uint8_t*> nalu_data_ptrs; |
+ std::vector<size_t> nalu_data_sizes; |
+ nalu_data_ptrs.reserve(3); |
+ nalu_data_sizes.reserve(3); |
+ nalu_data_ptrs.push_back(&last_sps_.front()); |
+ nalu_data_sizes.push_back(last_sps_.size()); |
+ if (!last_spsext_.empty()) { |
+ nalu_data_ptrs.push_back(&last_spsext_.front()); |
+ nalu_data_sizes.push_back(last_spsext_.size()); |
+ } |
+ nalu_data_ptrs.push_back(&last_pps_.front()); |
+ nalu_data_sizes.push_back(last_pps_.size()); |
// Construct a new format description from the parameter sets. |
// TODO(sandersd): Replace this with custom code to support OS X < 10.9. |
@@ -147,10 +165,15 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder( |
return false; |
} |
- // If the session is compatible, there's nothing to do. |
+ // Store the new configuration data. |
+ CMVideoDimensions coded_dimensions = |
+ CMVideoFormatDescriptionGetDimensions(format_); |
+ coded_size_.SetSize(coded_dimensions.width, coded_dimensions.height); |
+ |
+ // If the session is compatible, there's nothing else to do. |
if (session_ && |
VTDecompressionSessionCanAcceptFormatDescription(session_, format_)) { |
- return true; |
+ return true; |
} |
// Prepare VideoToolbox configuration dictionaries. |
@@ -174,8 +197,6 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder( |
&kCFTypeDictionaryKeyCallBacks, |
&kCFTypeDictionaryValueCallBacks)); |
- CMVideoDimensions coded_dimensions = |
- CMVideoFormatDescriptionGetDimensions(format_); |
#define CFINT(i) CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &i) |
// TODO(sandersd): RGBA option for 4:4:4 video. |
int32_t pixel_format = kCVPixelFormatType_422YpCbCr8; |
@@ -207,49 +228,27 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder( |
return true; |
} |
-void VTVideoDecodeAccelerator::Decode(const media::BitstreamBuffer& bitstream) { |
- DCHECK(CalledOnValidThread()); |
- // Not actually a requirement of the VDA API, but we're lazy and use negative |
- // values as flags internally. Revisit that if this actually happens. |
- if (bitstream.id() < 0) { |
- LOG(ERROR) << "Negative bitstream ID"; |
- NotifyError(INVALID_ARGUMENT); |
- client_->NotifyEndOfBitstreamBuffer(bitstream.id()); |
- return; |
- } |
- pending_bitstream_ids_.push(bitstream.id()); |
- decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind( |
- &VTVideoDecodeAccelerator::DecodeTask, base::Unretained(this), |
- bitstream)); |
-} |
- |
void VTVideoDecodeAccelerator::DecodeTask( |
- const media::BitstreamBuffer& bitstream) { |
+ const media::BitstreamBuffer& bitstream, |
+ Frame* frame) { |
Pawel Osciak
2014/11/12 12:57:30
I would strongly prefer if you used and passed sco
sandersd (OOO until July 31)
2014/11/13 00:05:45
I'm currently considering two options here:
1) C
Pawel Osciak
2014/11/15 03:28:02
How about 2), but when the frame is in decoding, p
|
DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread()); |
- // Once we have a bitstream buffer, we must either decode it or drop it. |
- // This construct ensures that the buffer is always dropped unless we call |
- // drop_bitstream.Release(). |
- base::ScopedClosureRunner drop_bitstream(base::Bind( |
- &VTVideoDecodeAccelerator::DropBitstream, base::Unretained(this), |
- bitstream.id())); |
- |
// Map the bitstream buffer. |
base::SharedMemory memory(bitstream.handle(), true); |
size_t size = bitstream.size(); |
if (!memory.Map(size)) { |
- LOG(ERROR) << "Failed to map bitstream buffer"; |
+ DLOG(ERROR) << "Failed to map bitstream buffer"; |
NotifyError(PLATFORM_FAILURE); |
return; |
} |
const uint8_t* buf = static_cast<uint8_t*>(memory.memory()); |
// NALUs are stored with Annex B format in the bitstream buffer (start codes), |
- // but VideoToolbox expects AVCC/MPEG-4 format (length headers), so we must |
- // rewrite the data. |
+ // but VideoToolbox expects AVC format (length headers), so we must rewrite |
+ // the data. |
// |
- // 1. Locate relevant NALUs and compute the size of the translated data. |
- // Also record any parameter sets for VideoToolbox initialization. |
+ // Locate relevant NALUs and compute the size of the rewritten data. Also |
+ // record any parameter sets for VideoToolbox initialization. |
bool config_changed = false; |
size_t data_size = 0; |
std::vector<media::H264NALU> nalus; |
@@ -260,11 +259,10 @@ void VTVideoDecodeAccelerator::DecodeTask( |
if (result == media::H264Parser::kEOStream) |
break; |
if (result != media::H264Parser::kOk) { |
- LOG(ERROR) << "Failed to find H.264 NALU"; |
+ DLOG(ERROR) << "Failed to find H.264 NALU"; |
NotifyError(PLATFORM_FAILURE); |
return; |
} |
- // TODO(sandersd): Strict ordering rules. |
switch (nalu.nal_unit_type) { |
case media::H264NALU::kSPS: |
last_sps_.assign(nalu.data, nalu.data + nalu.size); |
@@ -280,6 +278,15 @@ void VTVideoDecodeAccelerator::DecodeTask( |
last_pps_.assign(nalu.data, nalu.data + nalu.size); |
config_changed = true; |
break; |
+ case media::H264NALU::kSliceDataA: |
+ case media::H264NALU::kSliceDataB: |
+ case media::H264NALU::kSliceDataC: |
+ DLOG(ERROR) << "Coded slide data partitions not implemented."; |
+ NotifyError(PLATFORM_FAILURE); |
+ return; |
+ case media::H264NALU::kIDRSlice: |
+ case media::H264NALU::kNonIDRSlice: |
+ // TODO(sandersd): Compute pic_order_count. |
default: |
nalus.push_back(nalu); |
data_size += kNALUHeaderLength + nalu.size; |
@@ -287,47 +294,40 @@ void VTVideoDecodeAccelerator::DecodeTask( |
} |
} |
- // 2. Initialize VideoToolbox. |
- // TODO(sandersd): Check if the new configuration is identical before |
- // reconfiguring. |
+ // Initialize VideoToolbox. |
+ // TODO(sandersd): Instead of assuming that the last SPS and PPS units are |
+ // always the correct ones, maintain a cache of recent SPS and PPS units and |
+ // select from them using the slice header. |
if (config_changed) { |
if (last_sps_.size() == 0 || last_pps_.size() == 0) { |
- LOG(ERROR) << "Invalid configuration data"; |
+ DLOG(ERROR) << "Invalid configuration data"; |
NotifyError(INVALID_ARGUMENT); |
return; |
} |
- // TODO(sandersd): Check that the SPS and PPS IDs match. |
- std::vector<const uint8_t*> nalu_data_ptrs; |
- std::vector<size_t> nalu_data_sizes; |
- nalu_data_ptrs.push_back(&last_sps_.front()); |
- nalu_data_sizes.push_back(last_sps_.size()); |
- if (last_spsext_.size() != 0) { |
- nalu_data_ptrs.push_back(&last_spsext_.front()); |
- nalu_data_sizes.push_back(last_spsext_.size()); |
- } |
- nalu_data_ptrs.push_back(&last_pps_.front()); |
- nalu_data_sizes.push_back(last_pps_.size()); |
- |
- // If ConfigureDecoder() fails, it already called NotifyError(). |
- if (!ConfigureDecoder(nalu_data_ptrs, nalu_data_sizes)) |
+ if (!ConfigureDecoder()) |
return; |
} |
- // If there are no non-configuration units, immediately return an empty |
- // (ie. dropped) frame. It is an error to create a MemoryBlock with zero |
- // size. |
- if (!data_size) |
+ // If there are no non-configuration units, drop the bitstream buffer by |
+ // returning an empty frame. |
+ if (!data_size) { |
+ if (!FinishDelayedFrames()) |
+ return; |
+ gpu_task_runner_->PostTask(FROM_HERE, base::Bind( |
+ &VTVideoDecodeAccelerator::DecodeDone, |
+ weak_this_factory_.GetWeakPtr(), frame)); |
return; |
+ } |
- // If the session is not configured, fail. |
+ // If the session is not configured by this point, fail. |
if (!session_) { |
- LOG(ERROR) << "Image slice without configuration data"; |
+ DLOG(ERROR) << "Image slice without configuration"; |
NotifyError(INVALID_ARGUMENT); |
return; |
} |
- // 3. Allocate a memory-backed CMBlockBuffer for the translated data. |
- // TODO(sandersd): Check that the slice's PPS matches the current PPS. |
+ // Create a memory-backed CMBlockBuffer for the translated data. |
+ // TODO(sandersd): Pool of memory blocks. |
base::ScopedCFTypeRef<CMBlockBufferRef> data; |
OSStatus status = CMBlockBufferCreateWithMemoryBlock( |
kCFAllocatorDefault, |
@@ -344,7 +344,7 @@ void VTVideoDecodeAccelerator::DecodeTask( |
return; |
} |
- // 4. Copy NALU data, inserting length headers. |
+ // Copy NALU data into the CMBlockBuffer, inserting length headers. |
size_t offset = 0; |
for (size_t i = 0; i < nalus.size(); i++) { |
media::H264NALU& nalu = nalus[i]; |
@@ -364,8 +364,8 @@ void VTVideoDecodeAccelerator::DecodeTask( |
offset += nalu.size; |
} |
- // 5. Package the data for VideoToolbox and request decoding. |
- base::ScopedCFTypeRef<CMSampleBufferRef> frame; |
+ // Package the data in a CMSampleBuffer. |
+ base::ScopedCFTypeRef<CMSampleBufferRef> sample; |
status = CMSampleBufferCreate( |
kCFAllocatorDefault, |
data, // data_buffer |
@@ -378,369 +378,294 @@ void VTVideoDecodeAccelerator::DecodeTask( |
NULL, // &sample_timing_array |
0, // num_sample_size_entries |
NULL, // &sample_size_array |
- frame.InitializeInto()); |
+ sample.InitializeInto()); |
if (status) { |
NOTIFY_STATUS("CMSampleBufferCreate()", status); |
return; |
} |
+ // Update the frame data. |
+ frame->coded_size = coded_size_; |
+ |
+ // Send the frame for decoding. |
// Asynchronous Decompression allows for parallel submission of frames |
// (without it, DecodeFrame() does not return until the frame has been |
// decoded). We don't enable Temporal Processing so that frames are always |
// returned in decode order; this makes it easier to avoid deadlock. |
VTDecodeFrameFlags decode_flags = |
kVTDecodeFrame_EnableAsynchronousDecompression; |
- |
- intptr_t bitstream_id = bitstream.id(); |
status = VTDecompressionSessionDecodeFrame( |
session_, |
- frame, // sample_buffer |
+ sample, // sample_buffer |
decode_flags, // decode_flags |
- reinterpret_cast<void*>(bitstream_id), // source_frame_refcon |
+ reinterpret_cast<void*>(frame), // source_frame_refcon |
NULL); // &info_flags_out |
if (status) { |
NOTIFY_STATUS("VTDecompressionSessionDecodeFrame()", status); |
return; |
} |
- |
- // Now that the bitstream is decoding, don't drop it. |
- (void)drop_bitstream.Release(); |
} |
// This method may be called on any VideoToolbox thread. |
void VTVideoDecodeAccelerator::Output( |
- int32_t bitstream_id, |
+ void* source_frame_refcon, |
OSStatus status, |
CVImageBufferRef image_buffer) { |
if (status) { |
- // TODO(sandersd): Handle dropped frames. |
NOTIFY_STATUS("Decoding", status); |
- image_buffer = NULL; |
} else if (CFGetTypeID(image_buffer) != CVPixelBufferGetTypeID()) { |
- LOG(ERROR) << "Decoded frame is not a CVPixelBuffer"; |
+ DLOG(ERROR) << "Decoded frame is not a CVPixelBuffer"; |
NotifyError(PLATFORM_FAILURE); |
- image_buffer = NULL; |
} else { |
- CFRetain(image_buffer); |
+ Frame* frame = reinterpret_cast<Frame*>(source_frame_refcon); |
+ frame->image.reset(image_buffer, base::scoped_policy::RETAIN); |
+ gpu_task_runner_->PostTask(FROM_HERE, base::Bind( |
+ &VTVideoDecodeAccelerator::DecodeDone, |
+ weak_this_factory_.GetWeakPtr(), frame)); |
} |
+} |
+ |
+void VTVideoDecodeAccelerator::DecodeDone(Frame* frame) { |
+ DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
+ DCHECK_EQ(frame->bitstream_id, pending_frames_.front()->bitstream_id); |
+ Task task(TASK_FRAME); |
+ task.frame = pending_frames_.front(); |
+ pending_frames_.pop(); |
+ pending_tasks_.push(task); |
+ ProcessTasks(); |
+} |
+ |
+void VTVideoDecodeAccelerator::FlushTask(TaskType type) { |
+ DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread()); |
+ FinishDelayedFrames(); |
+ |
+ // Always queue a task, even if FinishDelayedFrames() fails, so that |
+ // destruction always completes. |
gpu_task_runner_->PostTask(FROM_HERE, base::Bind( |
- &VTVideoDecodeAccelerator::OutputTask, |
- weak_this_factory_.GetWeakPtr(), |
- DecodedFrame(bitstream_id, image_buffer))); |
+ &VTVideoDecodeAccelerator::FlushDone, |
+ weak_this_factory_.GetWeakPtr(), type)); |
} |
-void VTVideoDecodeAccelerator::OutputTask(DecodedFrame frame) { |
- DCHECK(CalledOnValidThread()); |
- decoded_frames_.push(frame); |
- ProcessDecodedFrames(); |
+void VTVideoDecodeAccelerator::FlushDone(TaskType type) { |
+ DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
+ pending_tasks_.push(Task(type)); |
+ ProcessTasks(); |
+} |
+ |
+void VTVideoDecodeAccelerator::Decode(const media::BitstreamBuffer& bitstream) { |
+ DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
+ DCHECK_EQ(assigned_bitstream_ids_.count(bitstream.id()), 0u); |
+ assigned_bitstream_ids_.insert(bitstream.id()); |
+ Frame* frame = new Frame(bitstream.id()); |
+ pending_frames_.push(make_linked_ptr(frame)); |
+ decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind( |
+ &VTVideoDecodeAccelerator::DecodeTask, base::Unretained(this), |
+ bitstream, frame)); |
} |
void VTVideoDecodeAccelerator::AssignPictureBuffers( |
const std::vector<media::PictureBuffer>& pictures) { |
- DCHECK(CalledOnValidThread()); |
+ DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
- for (size_t i = 0; i < pictures.size(); i++) { |
- DCHECK(!texture_ids_.count(pictures[i].id())); |
- assigned_picture_ids_.insert(pictures[i].id()); |
- available_picture_ids_.push_back(pictures[i].id()); |
- texture_ids_[pictures[i].id()] = pictures[i].texture_id(); |
+ for (const media::PictureBuffer& picture : pictures) { |
+ DCHECK(!texture_ids_.count(picture.id())); |
+ assigned_picture_ids_.insert(picture.id()); |
+ available_picture_ids_.push_back(picture.id()); |
+ texture_ids_[picture.id()] = picture.texture_id(); |
} |
// Pictures are not marked as uncleared until after this method returns, and |
// they will be broken if they are used before that happens. So, schedule |
// future work after that happens. |
gpu_task_runner_->PostTask(FROM_HERE, base::Bind( |
- &VTVideoDecodeAccelerator::ProcessDecodedFrames, |
+ &VTVideoDecodeAccelerator::ProcessTasks, |
weak_this_factory_.GetWeakPtr())); |
} |
void VTVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_id) { |
- DCHECK(CalledOnValidThread()); |
+ DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
DCHECK_EQ(CFGetRetainCount(picture_bindings_[picture_id]), 1); |
picture_bindings_.erase(picture_id); |
- // Don't put the picture back in the available list if has been dismissed. |
if (assigned_picture_ids_.count(picture_id) != 0) { |
available_picture_ids_.push_back(picture_id); |
- ProcessDecodedFrames(); |
- } |
-} |
- |
-void VTVideoDecodeAccelerator::CompleteAction(Action action) { |
- DCHECK(CalledOnValidThread()); |
- |
- switch (action) { |
- case ACTION_FLUSH: |
- client_->NotifyFlushDone(); |
- break; |
- case ACTION_RESET: |
- client_->NotifyResetDone(); |
- break; |
- case ACTION_DESTROY: |
- delete this; |
- break; |
+ ProcessTasks(); |
+ } else { |
+ client_->DismissPictureBuffer(picture_id); |
} |
} |
-void VTVideoDecodeAccelerator::CompleteActions(int32_t bitstream_id) { |
- DCHECK(CalledOnValidThread()); |
- while (!pending_actions_.empty() && |
- pending_actions_.front().bitstream_id == bitstream_id) { |
- CompleteAction(pending_actions_.front().action); |
- pending_actions_.pop(); |
- } |
-} |
+void VTVideoDecodeAccelerator::ProcessTasks() { |
+ DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
-void VTVideoDecodeAccelerator::ProcessDecodedFrames() { |
- DCHECK(CalledOnValidThread()); |
+ while (!pending_tasks_.empty()) { |
+ const Task& task = pending_tasks_.front(); |
- while (!decoded_frames_.empty()) { |
- if (pending_actions_.empty()) { |
- // No pending actions; send frames normally. |
- if (!has_error_) |
- SendPictures(pending_bitstream_ids_.back()); |
- return; |
- } |
- |
- int32_t next_action_bitstream_id = pending_actions_.front().bitstream_id; |
- int32_t last_sent_bitstream_id = -1; |
- switch (pending_actions_.front().action) { |
- case ACTION_FLUSH: |
- // Send frames normally. |
- if (has_error_) |
+ switch (state_) { |
+ case STATE_NORMAL: |
+ if (!ProcessTask(task)) |
return; |
- last_sent_bitstream_id = SendPictures(next_action_bitstream_id); |
+ pending_tasks_.pop(); |
+ break; |
+ |
+ case STATE_ERROR: |
+ // Do nothing until Destroy() is called. |
break; |
Pawel Osciak
2014/11/12 12:57:30
Does this mean we loop here forever, since pending
sandersd (OOO until July 31)
2014/11/13 00:05:45
Done.
|
- case ACTION_RESET: |
- // Drop decoded frames. |
- if (has_error_) |
+ case STATE_DESTROYING: |
+ // Discard tasks until destruction is complete. |
+ if (task.type == TASK_DESTROY) { |
+ delete this; |
return; |
- while (!decoded_frames_.empty() && |
- last_sent_bitstream_id != next_action_bitstream_id) { |
- last_sent_bitstream_id = decoded_frames_.front().bitstream_id; |
- decoded_frames_.pop(); |
- DCHECK_EQ(pending_bitstream_ids_.front(), last_sent_bitstream_id); |
- pending_bitstream_ids_.pop(); |
- client_->NotifyEndOfBitstreamBuffer(last_sent_bitstream_id); |
} |
+ pending_tasks_.pop(); |
break; |
+ } |
+ } |
+} |
- case ACTION_DESTROY: |
- // Drop decoded frames, without bookkeeping. |
- while (!decoded_frames_.empty()) { |
- last_sent_bitstream_id = decoded_frames_.front().bitstream_id; |
- decoded_frames_.pop(); |
- } |
+bool VTVideoDecodeAccelerator::ProcessTask(const Task& task) { |
+ DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
+ DCHECK_EQ(state_, STATE_NORMAL); |
- // Handle completing the action specially, as it is important not to |
- // access |this| after calling CompleteAction(). |
- if (last_sent_bitstream_id == next_action_bitstream_id) |
- CompleteAction(ACTION_DESTROY); |
+ switch (task.type) { |
+ case TASK_FRAME: |
+ return ProcessFrame(*task.frame); |
- // Either |this| was deleted or no more progress can be made. |
- return; |
- } |
- |
- // If we ran out of buffers (or pictures), no more progress can be made |
- // until more frames are decoded. |
- if (last_sent_bitstream_id != next_action_bitstream_id) |
- return; |
+ case TASK_FLUSH: |
+ DCHECK_EQ(task.type, pending_flush_tasks_.front()); |
+ pending_flush_tasks_.pop(); |
+ client_->NotifyFlushDone(); |
+ return true; |
- // Complete all actions pending for this |bitstream_id|, then loop to see |
- // if progress can be made on the next action. |
- CompleteActions(next_action_bitstream_id); |
- } |
-} |
+ case TASK_RESET: |
+ DCHECK_EQ(task.type, pending_flush_tasks_.front()); |
+ pending_flush_tasks_.pop(); |
+ client_->NotifyResetDone(); |
+ return true; |
-int32_t VTVideoDecodeAccelerator::ProcessDroppedFrames( |
- int32_t last_sent_bitstream_id, |
- int32_t up_to_bitstream_id) { |
- DCHECK(CalledOnValidThread()); |
- // Drop frames as long as there is a frame, we have not reached the next |
- // action, and the next frame has no image. |
- while (!decoded_frames_.empty() && |
- last_sent_bitstream_id != up_to_bitstream_id && |
- decoded_frames_.front().image_buffer.get() == NULL) { |
- const DecodedFrame& frame = decoded_frames_.front(); |
- DCHECK_EQ(pending_bitstream_ids_.front(), frame.bitstream_id); |
- client_->NotifyEndOfBitstreamBuffer(frame.bitstream_id); |
- last_sent_bitstream_id = frame.bitstream_id; |
- decoded_frames_.pop(); |
- pending_bitstream_ids_.pop(); |
+ case TASK_DESTROY: |
+ NOTREACHED() << "Can't destroy while in STATE_NORMAL."; |
+ NotifyError(ILLEGAL_STATE); |
+ return false; |
} |
- return last_sent_bitstream_id; |
} |
-// TODO(sandersd): If GpuVideoDecoder didn't specifically check the size of |
-// textures, this would be unnecessary, as the size is actually a property of |
-// the texture binding, not the texture. We rebind every frame, so the size |
-// passed to ProvidePictureBuffers() is meaningless. |
-void VTVideoDecodeAccelerator::ProcessSizeChangeIfNeeded() { |
- DCHECK(CalledOnValidThread()); |
- DCHECK(!decoded_frames_.empty()); |
- |
- // Find the size of the next image. |
- const DecodedFrame& frame = decoded_frames_.front(); |
- CVImageBufferRef image_buffer = frame.image_buffer.get(); |
- size_t width = CVPixelBufferGetWidth(image_buffer); |
- size_t height = CVPixelBufferGetHeight(image_buffer); |
- gfx::Size image_size(width, height); |
- |
- if (picture_size_ != image_size) { |
- // Dismiss all assigned picture buffers. |
- for (int32_t picture_id : assigned_picture_ids_) |
- client_->DismissPictureBuffer(picture_id); |
- assigned_picture_ids_.clear(); |
- available_picture_ids_.clear(); |
- |
- // Request new pictures. |
- client_->ProvidePictureBuffers( |
- kNumPictureBuffers, image_size, GL_TEXTURE_RECTANGLE_ARB); |
- picture_size_ = image_size; |
+bool VTVideoDecodeAccelerator::ProcessFrame(const Frame& frame) { |
+ DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
+ DCHECK_EQ(state_, STATE_NORMAL); |
+ // If the next pending flush is for a reset, then the frame will be dropped. |
+ bool resetting = !pending_flush_tasks_.empty() && |
+ pending_flush_tasks_.front() == TASK_RESET; |
+ if (!resetting && frame.image.get()) { |
+ // If the |coded_size| has changed, request new picture buffers and then |
+ // wait for them. |
+ // TODO(sandersd): If GpuVideoDecoder didn't specifically check the size of |
+ // textures, this would be unnecessary, as the size is actually a property |
+ // of the texture binding, not the texture. We rebind every frame, so the |
+ // size passed to ProvidePictureBuffers() is meaningless. |
+ if (picture_size_ != frame.coded_size) { |
+ // Dismiss current pictures. |
+ for (int32_t picture_id : assigned_picture_ids_) |
+ client_->DismissPictureBuffer(picture_id); |
+ assigned_picture_ids_.clear(); |
+ available_picture_ids_.clear(); |
+ |
+ // Request new pictures. |
+ picture_size_ = frame.coded_size; |
+ client_->ProvidePictureBuffers( |
+ kNumPictureBuffers, coded_size_, GL_TEXTURE_RECTANGLE_ARB); |
+ return false; |
+ } |
+ if (!SendFrame(frame)) |
+ return false; |
} |
+ assigned_bitstream_ids_.erase(frame.bitstream_id); |
+ client_->NotifyEndOfBitstreamBuffer(frame.bitstream_id); |
+ return true; |
} |
-int32_t VTVideoDecodeAccelerator::SendPictures(int32_t up_to_bitstream_id) { |
- DCHECK(CalledOnValidThread()); |
- DCHECK(!decoded_frames_.empty()); |
+bool VTVideoDecodeAccelerator::SendFrame(const Frame& frame) { |
+ DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
+ DCHECK_EQ(state_, STATE_NORMAL); |
- // TODO(sandersd): Store the actual last sent bitstream ID? |
- int32_t last_sent_bitstream_id = -1; |
- |
- last_sent_bitstream_id = |
- ProcessDroppedFrames(last_sent_bitstream_id, up_to_bitstream_id); |
- if (last_sent_bitstream_id == up_to_bitstream_id || decoded_frames_.empty()) |
- return last_sent_bitstream_id; |
- |
- ProcessSizeChangeIfNeeded(); |
if (available_picture_ids_.empty()) |
- return last_sent_bitstream_id; |
+ return false; |
+ |
+ int32_t picture_id = available_picture_ids_.back(); |
+ IOSurfaceRef surface = CVPixelBufferGetIOSurface(frame.image.get()); |
if (!make_context_current_.Run()) { |
- LOG(ERROR) << "Failed to make GL context current"; |
+ DLOG(ERROR) << "Failed to make GL context current"; |
NotifyError(PLATFORM_FAILURE); |
- return last_sent_bitstream_id; |
+ return false; |
} |
glEnable(GL_TEXTURE_RECTANGLE_ARB); |
- while (!available_picture_ids_.empty() && !has_error_) { |
- DCHECK_NE(last_sent_bitstream_id, up_to_bitstream_id); |
- DCHECK(!decoded_frames_.empty()); |
- |
- // We don't pop |frame| or |picture_id| until they are consumed, which may |
- // not happen if an error occurs. Conveniently, this also removes some |
- // refcounting. |
- const DecodedFrame& frame = decoded_frames_.front(); |
- DCHECK_EQ(pending_bitstream_ids_.front(), frame.bitstream_id); |
- int32_t picture_id = available_picture_ids_.back(); |
- |
- CVImageBufferRef image_buffer = frame.image_buffer.get(); |
- IOSurfaceRef surface = CVPixelBufferGetIOSurface(image_buffer); |
- |
- gfx::ScopedTextureBinder |
- texture_binder(GL_TEXTURE_RECTANGLE_ARB, texture_ids_[picture_id]); |
- CGLError status = CGLTexImageIOSurface2D( |
- cgl_context_, // ctx |
- GL_TEXTURE_RECTANGLE_ARB, // target |
- GL_RGB, // internal_format |
- picture_size_.width(), // width |
- picture_size_.height(), // height |
- GL_YCBCR_422_APPLE, // format |
- GL_UNSIGNED_SHORT_8_8_APPLE, // type |
- surface, // io_surface |
- 0); // plane |
- if (status != kCGLNoError) { |
- NOTIFY_STATUS("CGLTexImageIOSurface2D()", status); |
- break; |
- } |
- |
- picture_bindings_[picture_id] = frame.image_buffer; |
- client_->PictureReady(media::Picture( |
- picture_id, frame.bitstream_id, gfx::Rect(picture_size_))); |
- available_picture_ids_.pop_back(); |
- client_->NotifyEndOfBitstreamBuffer(frame.bitstream_id); |
- last_sent_bitstream_id = frame.bitstream_id; |
- decoded_frames_.pop(); |
- pending_bitstream_ids_.pop(); |
- |
- last_sent_bitstream_id = |
- ProcessDroppedFrames(last_sent_bitstream_id, up_to_bitstream_id); |
- if (last_sent_bitstream_id == up_to_bitstream_id || decoded_frames_.empty()) |
- break; |
- |
- ProcessSizeChangeIfNeeded(); |
+ gfx::ScopedTextureBinder |
+ texture_binder(GL_TEXTURE_RECTANGLE_ARB, texture_ids_[picture_id]); |
+ CGLError status = CGLTexImageIOSurface2D( |
+ cgl_context_, // ctx |
+ GL_TEXTURE_RECTANGLE_ARB, // target |
+ GL_RGB, // internal_format |
+ frame.coded_size.width(), // width |
+ frame.coded_size.height(), // height |
+ GL_YCBCR_422_APPLE, // format |
+ GL_UNSIGNED_SHORT_8_8_APPLE, // type |
+ surface, // io_surface |
+ 0); // plane |
+ if (status != kCGLNoError) { |
+ NOTIFY_STATUS("CGLTexImageIOSurface2D()", status); |
+ return false; |
} |
glDisable(GL_TEXTURE_RECTANGLE_ARB); |
- return last_sent_bitstream_id; |
-} |
- |
-void VTVideoDecodeAccelerator::FlushTask() { |
- DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread()); |
- OSStatus status = VTDecompressionSessionFinishDelayedFrames(session_); |
- if (status) |
- NOTIFY_STATUS("VTDecompressionSessionFinishDelayedFrames()", status); |
-} |
- |
-void VTVideoDecodeAccelerator::QueueAction(Action action) { |
- DCHECK(CalledOnValidThread()); |
- if (pending_bitstream_ids_.empty()) { |
- // If there are no pending frames, all actions complete immediately. |
- CompleteAction(action); |
- } else { |
- // Otherwise, queue the action. |
- pending_actions_.push(PendingAction(action, pending_bitstream_ids_.back())); |
- |
- // Request a flush to make sure the action will eventually complete. |
- decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind( |
- &VTVideoDecodeAccelerator::FlushTask, base::Unretained(this))); |
- |
- // See if we can make progress now that there is a new pending action. |
- ProcessDecodedFrames(); |
- } |
+ available_picture_ids_.pop_back(); |
+ picture_bindings_[picture_id] = frame.image; |
+ client_->PictureReady(media::Picture( |
+ picture_id, frame.bitstream_id, gfx::Rect(frame.coded_size))); |
+ return true; |
} |
void VTVideoDecodeAccelerator::NotifyError(Error error) { |
- if (!CalledOnValidThread()) { |
+ if (!gpu_thread_checker_.CalledOnValidThread()) { |
gpu_task_runner_->PostTask(FROM_HERE, base::Bind( |
&VTVideoDecodeAccelerator::NotifyError, |
- weak_this_factory_.GetWeakPtr(), |
- error)); |
- return; |
+ weak_this_factory_.GetWeakPtr(), error)); |
+ } else if (state_ == STATE_NORMAL) { |
+ state_ = STATE_ERROR; |
+ client_->NotifyError(error); |
} |
- has_error_ = true; |
- client_->NotifyError(error); |
} |
-void VTVideoDecodeAccelerator::DropBitstream(int32_t bitstream_id) { |
- DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread()); |
- gpu_task_runner_->PostTask(FROM_HERE, base::Bind( |
- &VTVideoDecodeAccelerator::OutputTask, |
- weak_this_factory_.GetWeakPtr(), |
- DecodedFrame(bitstream_id, NULL))); |
+void VTVideoDecodeAccelerator::QueueFlush(TaskType type) { |
+ DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
+ pending_flush_tasks_.push(type); |
+ decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind( |
+ &VTVideoDecodeAccelerator::FlushTask, base::Unretained(this), |
+ type)); |
+ |
+ // If this is a new flush request, see if we can make progress. |
+ if (pending_flush_tasks_.size() == 1) |
+ ProcessTasks(); |
} |
void VTVideoDecodeAccelerator::Flush() { |
- DCHECK(CalledOnValidThread()); |
- QueueAction(ACTION_FLUSH); |
+ DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
+ QueueFlush(TASK_FLUSH); |
} |
void VTVideoDecodeAccelerator::Reset() { |
- DCHECK(CalledOnValidThread()); |
- QueueAction(ACTION_RESET); |
+ DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
+ QueueFlush(TASK_RESET); |
} |
void VTVideoDecodeAccelerator::Destroy() { |
- DCHECK(CalledOnValidThread()); |
- // Drop any other pending actions. |
- while (!pending_actions_.empty()) |
- pending_actions_.pop(); |
- // Return all bitstream buffers. |
- while (!pending_bitstream_ids_.empty()) { |
- client_->NotifyEndOfBitstreamBuffer(pending_bitstream_ids_.front()); |
- pending_bitstream_ids_.pop(); |
- } |
- QueueAction(ACTION_DESTROY); |
+ DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
+ for (int32_t bitstream_id : assigned_bitstream_ids_) |
+ client_->NotifyEndOfBitstreamBuffer(bitstream_id); |
+ assigned_bitstream_ids_.clear(); |
+ state_ = STATE_DESTROYING; |
+ QueueFlush(TASK_DESTROY); |
} |
bool VTVideoDecodeAccelerator::CanDecodeOnIOThread() { |