Index: content/common/gpu/media/vt_video_decode_accelerator.cc |
diff --git a/content/common/gpu/media/vt_video_decode_accelerator.cc b/content/common/gpu/media/vt_video_decode_accelerator.cc |
index 63dc28c90c34bf0b8c3e9aa1beed11f414280f7b..b09a5e3e0c3ee36cc1ba0a1bf145be0e6fc42879 100644 |
--- a/content/common/gpu/media/vt_video_decode_accelerator.cc |
+++ b/content/common/gpu/media/vt_video_decode_accelerator.cc |
@@ -146,8 +146,12 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder( |
status); |
return false; |
} |
- CMVideoDimensions coded_dimensions = |
- CMVideoFormatDescriptionGetDimensions(format_); |
+ |
+ // If the session is compatible, there's nothing to do. |
+ if (session_ && |
+ VTDecompressionSessionCanAcceptFormatDescription(session_, format_)) { |
+ return true; |
+ } |
// Prepare VideoToolbox configuration dictionaries. |
base::ScopedCFTypeRef<CFMutableDictionaryRef> decoder_config( |
@@ -170,6 +174,8 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder( |
&kCFTypeDictionaryKeyCallBacks, |
&kCFTypeDictionaryValueCallBacks)); |
+ CMVideoDimensions coded_dimensions = |
+ CMVideoFormatDescriptionGetDimensions(format_); |
#define CFINT(i) CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &i) |
// TODO(sandersd): RGBA option for 4:4:4 video. |
int32_t pixel_format = kCVPixelFormatType_422YpCbCr8; |
@@ -184,7 +190,7 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder( |
CFDictionarySetValue( |
image_config, kCVPixelBufferOpenGLCompatibilityKey, kCFBooleanTrue); |
- // TODO(sandersd): Check if the session is already compatible. |
+ // TODO(sandersd): Does the old session need to be flushed first? |
session_.reset(); |
status = VTDecompressionSessionCreate( |
kCFAllocatorDefault, |
@@ -198,18 +204,6 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder( |
return false; |
} |
- // If the size has changed, trigger a request for new picture buffers. |
- // TODO(sandersd): Move to SendPictures(), and use this just as a hint for an |
- // upcoming size change. |
- gfx::Size new_coded_size(coded_dimensions.width, coded_dimensions.height); |
- if (coded_size_ != new_coded_size) { |
- coded_size_ = new_coded_size; |
- gpu_task_runner_->PostTask(FROM_HERE, base::Bind( |
- &VTVideoDecodeAccelerator::SizeChangedTask, |
- weak_this_factory_.GetWeakPtr(), |
- coded_size_));; |
- } |
- |
return true; |
} |
@@ -256,10 +250,9 @@ void VTVideoDecodeAccelerator::DecodeTask( |
// |
// 1. Locate relevant NALUs and compute the size of the translated data. |
// Also record any parameter sets for VideoToolbox initialization. |
+ bool config_changed = false; |
size_t data_size = 0; |
std::vector<media::H264NALU> nalus; |
- std::vector<const uint8_t*> config_nalu_data_ptrs; |
- std::vector<size_t> config_nalu_data_sizes; |
parser_.SetStream(buf, size); |
media::H264NALU nalu; |
while (true) { |
@@ -271,24 +264,52 @@ void VTVideoDecodeAccelerator::DecodeTask( |
NotifyError(PLATFORM_FAILURE); |
return; |
} |
- // TODO(sandersd): Check that these are only at the start. |
- if (nalu.nal_unit_type == media::H264NALU::kSPS || |
- nalu.nal_unit_type == media::H264NALU::kPPS || |
- nalu.nal_unit_type == media::H264NALU::kSPSExt) { |
- DVLOG(2) << "Parameter set " << nalu.nal_unit_type; |
- config_nalu_data_ptrs.push_back(nalu.data); |
- config_nalu_data_sizes.push_back(nalu.size); |
- } else { |
- nalus.push_back(nalu); |
- data_size += kNALUHeaderLength + nalu.size; |
+ // TODO(sandersd): Strict ordering rules. |
+ switch (nalu.nal_unit_type) { |
+ case media::H264NALU::kSPS: |
+ last_sps_.assign(nalu.data, nalu.data + nalu.size); |
+ last_spsext_.clear(); |
+ config_changed = true; |
+ break; |
+ case media::H264NALU::kSPSExt: |
+ // TODO(sandersd): Check that the previous NALU was an SPS. |
+ last_spsext_.assign(nalu.data, nalu.data + nalu.size); |
+ config_changed = true; |
+ break; |
+ case media::H264NALU::kPPS: |
+ last_pps_.assign(nalu.data, nalu.data + nalu.size); |
+ config_changed = true; |
+ break; |
+ default: |
+ nalus.push_back(nalu); |
+ data_size += kNALUHeaderLength + nalu.size; |
+ break; |
} |
} |
// 2. Initialize VideoToolbox. |
- // TODO(sandersd): Reinitialize when there are new parameter sets. |
- if (!session_) { |
- // If configuring fails, ConfigureDecoder() already called NotifyError(). |
- if (!ConfigureDecoder(config_nalu_data_ptrs, config_nalu_data_sizes)) |
+ // TODO(sandersd): Check if the new configuration is identical before |
+ // reconfiguring. |
+ if (config_changed) { |
+ if (last_sps_.size() == 0 || last_pps_.size() == 0) { |
+ LOG(ERROR) << "Invalid configuration data"; |
+ NotifyError(INVALID_ARGUMENT); |
+ return; |
+ } |
+ // TODO(sandersd): Check that the SPS and PPS IDs match. |
+ std::vector<const uint8_t*> nalu_data_ptrs; |
+ std::vector<size_t> nalu_data_sizes; |
+ nalu_data_ptrs.push_back(&last_sps_.front()); |
+ nalu_data_sizes.push_back(last_sps_.size()); |
+ if (last_spsext_.size() != 0) { |
+ nalu_data_ptrs.push_back(&last_spsext_.front()); |
+ nalu_data_sizes.push_back(last_spsext_.size()); |
+ } |
+ nalu_data_ptrs.push_back(&last_pps_.front()); |
+ nalu_data_sizes.push_back(last_pps_.size()); |
+ |
+ // If ConfigureDecoder() fails, it already called NotifyError(). |
+ if (!ConfigureDecoder(nalu_data_ptrs, nalu_data_sizes)) |
return; |
} |
@@ -298,7 +319,15 @@ void VTVideoDecodeAccelerator::DecodeTask( |
if (!data_size) |
return; |
+ // If the session is not configured, fail. |
+ if (!session_) { |
+ LOG(ERROR) << "Image slice without configuration data"; |
+ NotifyError(INVALID_ARGUMENT); |
+ return; |
+ } |
+ |
// 3. Allocate a memory-backed CMBlockBuffer for the translated data. |
+ // TODO(sandersd): Check that the slice's PPS matches the current PPS. |
base::ScopedCFTypeRef<CMBlockBufferRef> data; |
OSStatus status = CMBlockBufferCreateWithMemoryBlock( |
kCFAllocatorDefault, |
@@ -406,21 +435,14 @@ void VTVideoDecodeAccelerator::OutputTask(DecodedFrame frame) { |
ProcessDecodedFrames(); |
} |
-void VTVideoDecodeAccelerator::SizeChangedTask(gfx::Size coded_size) { |
- DCHECK(CalledOnValidThread()); |
- texture_size_ = coded_size; |
- // TODO(sandersd): Dismiss existing picture buffers. |
- client_->ProvidePictureBuffers( |
- kNumPictureBuffers, texture_size_, GL_TEXTURE_RECTANGLE_ARB); |
-} |
- |
void VTVideoDecodeAccelerator::AssignPictureBuffers( |
const std::vector<media::PictureBuffer>& pictures) { |
DCHECK(CalledOnValidThread()); |
for (size_t i = 0; i < pictures.size(); i++) { |
DCHECK(!texture_ids_.count(pictures[i].id())); |
- available_picture_ids_.push(pictures[i].id()); |
+ assigned_picture_ids_.insert(pictures[i].id()); |
+ available_picture_ids_.push_back(pictures[i].id()); |
texture_ids_[pictures[i].id()] = pictures[i].texture_id(); |
} |
@@ -436,12 +458,16 @@ void VTVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_id) { |
DCHECK(CalledOnValidThread()); |
DCHECK_EQ(CFGetRetainCount(picture_bindings_[picture_id]), 1); |
picture_bindings_.erase(picture_id); |
- available_picture_ids_.push(picture_id); |
- ProcessDecodedFrames(); |
+ // Don't put the picture back in the available list if has been dismissed. |
+ if (assigned_picture_ids_.count(picture_id) != 0) { |
+ available_picture_ids_.push_back(picture_id); |
+ ProcessDecodedFrames(); |
+ } |
} |
void VTVideoDecodeAccelerator::CompleteAction(Action action) { |
DCHECK(CalledOnValidThread()); |
+ |
switch (action) { |
case ACTION_FLUSH: |
client_->NotifyFlushDone(); |
@@ -526,6 +552,54 @@ void VTVideoDecodeAccelerator::ProcessDecodedFrames() { |
} |
} |
+int32_t VTVideoDecodeAccelerator::ProcessDroppedFrames( |
+ int32_t last_sent_bitstream_id, |
+ int32_t up_to_bitstream_id) { |
+ DCHECK(CalledOnValidThread()); |
+ // Drop frames as long as there is a frame, we have not reached the next |
+ // action, and the next frame has no image. |
+ while (!decoded_frames_.empty() && |
+ last_sent_bitstream_id != up_to_bitstream_id && |
+ decoded_frames_.front().image_buffer.get() == NULL) { |
+ const DecodedFrame& frame = decoded_frames_.front(); |
+ DCHECK_EQ(pending_bitstream_ids_.front(), frame.bitstream_id); |
+ client_->NotifyEndOfBitstreamBuffer(frame.bitstream_id); |
+ last_sent_bitstream_id = frame.bitstream_id; |
+ decoded_frames_.pop(); |
+ pending_bitstream_ids_.pop(); |
+ } |
+ return last_sent_bitstream_id; |
+} |
+ |
+// TODO(sandersd): If GpuVideoDecoder didn't specifically check the size of |
+// textures, this would be unnecessary, as the size is actually a property of |
+// the texture binding, not the texture. We rebind every frame, so the size |
+// passed to ProvidePictureBuffers() is meaningless. |
+void VTVideoDecodeAccelerator::ProcessSizeChangeIfNeeded() { |
+ DCHECK(CalledOnValidThread()); |
+ DCHECK(!decoded_frames_.empty()); |
+ |
+ // Find the size of the next image. |
+ const DecodedFrame& frame = decoded_frames_.front(); |
+ CVImageBufferRef image_buffer = frame.image_buffer.get(); |
+ size_t width = CVPixelBufferGetWidth(image_buffer); |
+ size_t height = CVPixelBufferGetHeight(image_buffer); |
+ gfx::Size image_size(width, height); |
+ |
+ if (picture_size_ != image_size) { |
+ // Dismiss all assigned picture buffers. |
+ for (int32_t picture_id : assigned_picture_ids_) |
+ client_->DismissPictureBuffer(picture_id); |
+ assigned_picture_ids_.clear(); |
+ available_picture_ids_.clear(); |
+ |
+ // Request new pictures. |
+ client_->ProvidePictureBuffers( |
+ kNumPictureBuffers, image_size, GL_TEXTURE_RECTANGLE_ARB); |
+ picture_size_ = image_size; |
+ } |
+} |
+ |
int32_t VTVideoDecodeAccelerator::SendPictures(int32_t up_to_bitstream_id) { |
DCHECK(CalledOnValidThread()); |
DCHECK(!decoded_frames_.empty()); |
@@ -533,6 +607,12 @@ int32_t VTVideoDecodeAccelerator::SendPictures(int32_t up_to_bitstream_id) { |
// TODO(sandersd): Store the actual last sent bitstream ID? |
int32_t last_sent_bitstream_id = -1; |
+ last_sent_bitstream_id = |
+ ProcessDroppedFrames(last_sent_bitstream_id, up_to_bitstream_id); |
+ if (last_sent_bitstream_id == up_to_bitstream_id || decoded_frames_.empty()) |
+ return last_sent_bitstream_id; |
+ |
+ ProcessSizeChangeIfNeeded(); |
if (available_picture_ids_.empty()) |
return last_sent_bitstream_id; |
@@ -543,52 +623,52 @@ int32_t VTVideoDecodeAccelerator::SendPictures(int32_t up_to_bitstream_id) { |
} |
glEnable(GL_TEXTURE_RECTANGLE_ARB); |
- while (!available_picture_ids_.empty() && |
- !decoded_frames_.empty() && |
- last_sent_bitstream_id != up_to_bitstream_id && |
- !has_error_) { |
- // We don't pop |frame| until it is consumed, which won't happen if an |
- // error occurs. Conveniently, this also removes some refcounting. |
+ while (!available_picture_ids_.empty() && !has_error_) { |
+ DCHECK_NE(last_sent_bitstream_id, up_to_bitstream_id); |
+ DCHECK(!decoded_frames_.empty()); |
+ |
+ // We don't pop |frame| or |picture_id| until they are consumed, which may |
+ // not happen if an error occurs. Conveniently, this also removes some |
+ // refcounting. |
const DecodedFrame& frame = decoded_frames_.front(); |
DCHECK_EQ(pending_bitstream_ids_.front(), frame.bitstream_id); |
- |
- // Likewise, |picture_id| won't be popped if |image_buffer| is NULL or an |
- // error occurs. |
- // TODO(sandersd): Don't block waiting for a |picture_id| when |
- // |image_buffer| is NULL. |
- int32_t picture_id = available_picture_ids_.front(); |
+ int32_t picture_id = available_picture_ids_.back(); |
CVImageBufferRef image_buffer = frame.image_buffer.get(); |
- if (image_buffer) { |
- IOSurfaceRef surface = CVPixelBufferGetIOSurface(image_buffer); |
- |
- gfx::ScopedTextureBinder |
- texture_binder(GL_TEXTURE_RECTANGLE_ARB, texture_ids_[picture_id]); |
- CGLError status = CGLTexImageIOSurface2D( |
- cgl_context_, // ctx |
- GL_TEXTURE_RECTANGLE_ARB, // target |
- GL_RGB, // internal_format |
- texture_size_.width(), // width |
- texture_size_.height(), // height |
- GL_YCBCR_422_APPLE, // format |
- GL_UNSIGNED_SHORT_8_8_APPLE, // type |
- surface, // io_surface |
- 0); // plane |
- if (status != kCGLNoError) { |
- NOTIFY_STATUS("CGLTexImageIOSurface2D()", status); |
- break; |
- } |
- |
- picture_bindings_[picture_id] = frame.image_buffer; |
- client_->PictureReady(media::Picture( |
- picture_id, frame.bitstream_id, gfx::Rect(texture_size_))); |
- available_picture_ids_.pop(); |
+ IOSurfaceRef surface = CVPixelBufferGetIOSurface(image_buffer); |
+ |
+ gfx::ScopedTextureBinder |
+ texture_binder(GL_TEXTURE_RECTANGLE_ARB, texture_ids_[picture_id]); |
+ CGLError status = CGLTexImageIOSurface2D( |
+ cgl_context_, // ctx |
+ GL_TEXTURE_RECTANGLE_ARB, // target |
+ GL_RGB, // internal_format |
+ picture_size_.width(), // width |
+ picture_size_.height(), // height |
+ GL_YCBCR_422_APPLE, // format |
+ GL_UNSIGNED_SHORT_8_8_APPLE, // type |
+ surface, // io_surface |
+ 0); // plane |
+ if (status != kCGLNoError) { |
+ NOTIFY_STATUS("CGLTexImageIOSurface2D()", status); |
+ break; |
} |
+ picture_bindings_[picture_id] = frame.image_buffer; |
+ client_->PictureReady(media::Picture( |
+ picture_id, frame.bitstream_id, gfx::Rect(picture_size_))); |
+ available_picture_ids_.pop_back(); |
client_->NotifyEndOfBitstreamBuffer(frame.bitstream_id); |
last_sent_bitstream_id = frame.bitstream_id; |
decoded_frames_.pop(); |
pending_bitstream_ids_.pop(); |
+ |
+ last_sent_bitstream_id = |
+ ProcessDroppedFrames(last_sent_bitstream_id, up_to_bitstream_id); |
+ if (last_sent_bitstream_id == up_to_bitstream_id || decoded_frames_.empty()) |
+ break; |
+ |
+ ProcessSizeChangeIfNeeded(); |
} |
glDisable(GL_TEXTURE_RECTANGLE_ARB); |