Index: content/renderer/media/rtc_video_decoder.cc |
diff --git a/content/renderer/media/rtc_video_decoder.cc b/content/renderer/media/rtc_video_decoder.cc |
index 302d5d5ef85add26ab23afea2c15ebc7a04bf37a..123a90ebfb208de73ae3650c43e53658dc9215b6 100644 |
--- a/content/renderer/media/rtc_video_decoder.cc |
+++ b/content/renderer/media/rtc_video_decoder.cc |
@@ -384,8 +384,12 @@ void RTCVideoDecoder::PictureReady(const media::Picture& picture) { |
return; |
} |
+ media::VideoPixelFormat pixel_format = vda_->GetOutputFormat(); |
+ if (pixel_format == media::PIXEL_FORMAT_UNKNOWN) |
+ pixel_format = media::PIXEL_FORMAT_ARGB; |
+ |
scoped_refptr<media::VideoFrame> frame = |
- CreateVideoFrame(picture, pb, timestamp, visible_rect); |
+ CreateVideoFrame(picture, pb, timestamp, visible_rect, pixel_format); |
if (!frame) { |
NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); |
return; |
@@ -416,19 +420,21 @@ scoped_refptr<media::VideoFrame> RTCVideoDecoder::CreateVideoFrame( |
const media::Picture& picture, |
const media::PictureBuffer& pb, |
uint32_t timestamp, |
- const gfx::Rect& visible_rect) { |
+ const gfx::Rect& visible_rect, |
+ media::VideoPixelFormat pixel_format) { |
DCHECK(decoder_texture_target_); |
// Convert timestamp from 90KHz to ms. |
base::TimeDelta timestamp_ms = base::TimeDelta::FromInternalValue( |
base::checked_cast<uint64_t>(timestamp) * 1000 / 90); |
- // TODO(mcasas): The incoming data is actually a YUV format, but is labelled |
- // as ARGB. This prevents the compositor from messing with it, since the |
- // underlying platform can handle the former format natively. Make sure the |
+ // TODO(mcasas): The incoming data may actually be in a YUV format, but may be |
+ // labelled as ARGB. This may or may not be reported by VDA, depending on |
+ // whether it provides an implementation of VDA::GetOutputFormat(). |
+ // This prevents the compositor from messing with it, since the underlying |
+ // platform can handle the former format natively. Make sure the |
// correct format is used and everyone down the line understands it. |
scoped_refptr<media::VideoFrame> frame = media::VideoFrame::WrapNativeTexture( |
- media::PIXEL_FORMAT_ARGB, |
- gpu::MailboxHolder(pb.texture_mailbox(0), gpu::SyncToken(), |
- decoder_texture_target_), |
+ pixel_format, gpu::MailboxHolder(pb.texture_mailbox(0), gpu::SyncToken(), |
+ decoder_texture_target_), |
media::BindToCurrentLoop(base::Bind( |
&RTCVideoDecoder::ReleaseMailbox, weak_factory_.GetWeakPtr(), |
factories_, picture.picture_buffer_id(), pb.texture_ids()[0])), |