Chromium Code Reviews| Index: content/common/gpu/media/gpu_video_decode_accelerator.cc |
| diff --git a/content/common/gpu/media/gpu_video_decode_accelerator.cc b/content/common/gpu/media/gpu_video_decode_accelerator.cc |
| index 302caea392dc8823a23c359aeed7a0c4cca9769c..5d94e04c7bf92da18d3ebe4c45f8c63b64fa5efa 100644 |
| --- a/content/common/gpu/media/gpu_video_decode_accelerator.cc |
| +++ b/content/common/gpu/media/gpu_video_decode_accelerator.cc |
| @@ -151,12 +151,46 @@ GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() { |
| DCHECK(!video_decode_accelerator_); |
| } |
| +// static |
| +gpu::VideoDecodeAcceleratorSupportedProfiles |
|
xhwang
2015/11/13 01:16:55
here and below, non-cdm methods are moved to fix t
dcheng
2015/11/13 19:54:53
In the future, maybe just do this in a separate pa
xhwang
2015/11/13 20:06:22
Acknowledged.
|
| +GpuVideoDecodeAccelerator::GetSupportedProfiles() { |
| + media::VideoDecodeAccelerator::SupportedProfiles profiles; |
| + const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess(); |
| + if (cmd_line->HasSwitch(switches::kDisableAcceleratedVideoDecode)) |
| + return gpu::VideoDecodeAcceleratorSupportedProfiles(); |
| + |
| + // Query supported profiles for each VDA. The order of querying VDAs should |
| + // be the same as the order of initializing VDAs. Then the returned profile |
| + // can be initialized by corresponding VDA successfully. |
| +#if defined(OS_WIN) |
| + profiles = DXVAVideoDecodeAccelerator::GetSupportedProfiles(); |
| +#elif defined(OS_CHROMEOS) |
| + media::VideoDecodeAccelerator::SupportedProfiles vda_profiles; |
| +#if defined(USE_V4L2_CODEC) |
| + vda_profiles = V4L2VideoDecodeAccelerator::GetSupportedProfiles(); |
| + GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(vda_profiles, &profiles); |
| + vda_profiles = V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles(); |
| + GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(vda_profiles, &profiles); |
| +#endif |
| +#if defined(ARCH_CPU_X86_FAMILY) |
| + vda_profiles = VaapiVideoDecodeAccelerator::GetSupportedProfiles(); |
| + GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(vda_profiles, &profiles); |
| +#endif |
| +#elif defined(OS_MACOSX) |
| + profiles = VTVideoDecodeAccelerator::GetSupportedProfiles(); |
| +#elif defined(OS_ANDROID) |
| + profiles = AndroidVideoDecodeAccelerator::GetSupportedProfiles(); |
| +#endif |
| + return GpuVideoAcceleratorUtil::ConvertMediaToGpuDecodeProfiles(profiles); |
| +} |
| + |
| bool GpuVideoDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) { |
| if (!video_decode_accelerator_) |
| return false; |
| bool handled = true; |
| IPC_BEGIN_MESSAGE_MAP(GpuVideoDecodeAccelerator, msg) |
| + IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_SetCdm, OnSetCdm) |
| IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Decode, OnDecode) |
| IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_AssignPictureBuffers, |
| OnAssignPictureBuffers) |
| @@ -170,6 +204,12 @@ bool GpuVideoDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) { |
| return handled; |
| } |
| +void GpuVideoDecodeAccelerator::NotifyCdmAttached(bool success) { |
| + if (!Send(new AcceleratedVideoDecoderHostMsg_CdmAttached(host_route_id_, |
| + success))) |
| + DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_CdmAttached) failed"; |
| +} |
| + |
| void GpuVideoDecodeAccelerator::ProvidePictureBuffers( |
| uint32 requested_num_of_buffers, |
| const gfx::Size& dimensions, |
| @@ -225,6 +265,26 @@ void GpuVideoDecodeAccelerator::PictureReady( |
| } |
| } |
| +void GpuVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer( |
| + int32 bitstream_buffer_id) { |
| + if (!Send(new AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed( |
| + host_route_id_, bitstream_buffer_id))) { |
| + DLOG(ERROR) |
| + << "Send(AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed) " |
| + << "failed"; |
| + } |
| +} |
| + |
| +void GpuVideoDecodeAccelerator::NotifyFlushDone() { |
| + if (!Send(new AcceleratedVideoDecoderHostMsg_FlushDone(host_route_id_))) |
| + DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_FlushDone) failed"; |
| +} |
| + |
| +void GpuVideoDecodeAccelerator::NotifyResetDone() { |
| + if (!Send(new AcceleratedVideoDecoderHostMsg_ResetDone(host_route_id_))) |
| + DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ResetDone) failed"; |
| +} |
| + |
| void GpuVideoDecodeAccelerator::NotifyError( |
| media::VideoDecodeAccelerator::Error error) { |
| if (!Send(new AcceleratedVideoDecoderHostMsg_ErrorNotification( |
| @@ -234,6 +294,34 @@ void GpuVideoDecodeAccelerator::NotifyError( |
| } |
| } |
| +void GpuVideoDecodeAccelerator::OnWillDestroyStub() { |
| + // The stub is going away, so we have to stop and destroy VDA here, before |
| + // returning, because the VDA may need the GL context to run and/or do its |
| + // cleanup. We cannot destroy the VDA before the IO thread message filter is |
| + // removed however, since we cannot service incoming messages with VDA gone. |
| + // We cannot simply check for existence of VDA on IO thread though, because |
| + // we don't want to synchronize the IO thread with the ChildThread. |
| + // So we have to wait for the RemoveFilter callback here instead and remove |
| + // the VDA after it arrives and before returning. |
| + if (filter_.get()) { |
|
dcheng
2015/11/13 19:54:53
No .get() (unless you end up moving this code in a
xhwang
2015/11/13 20:06:22
Done.
|
| + stub_->channel()->RemoveFilter(filter_.get()); |
| + filter_removed_.Wait(); |
| + } |
| + |
| + stub_->channel()->RemoveRoute(host_route_id_); |
| + stub_->RemoveDestructionObserver(this); |
| + |
| + video_decode_accelerator_.reset(); |
| + delete this; |
| +} |
| + |
| +bool GpuVideoDecodeAccelerator::Send(IPC::Message* message) { |
| + if (filter_.get() && io_task_runner_->BelongsToCurrentThread()) |
|
dcheng
2015/11/13 19:54:53
Ditto
xhwang
2015/11/13 20:06:22
Done.
|
| + return filter_->SendOnIOThread(message); |
| + DCHECK(child_task_runner_->BelongsToCurrentThread()); |
| + return stub_->channel()->Send(message); |
| +} |
| + |
| void GpuVideoDecodeAccelerator::Initialize( |
| const media::VideoCodecProfile profile, |
| IPC::Message* init_done_msg) { |
| @@ -397,37 +485,9 @@ GpuVideoDecodeAccelerator::CreateAndroidVDA() { |
| return decoder.Pass(); |
| } |
| -// static |
| -gpu::VideoDecodeAcceleratorSupportedProfiles |
| -GpuVideoDecodeAccelerator::GetSupportedProfiles() { |
| - media::VideoDecodeAccelerator::SupportedProfiles profiles; |
| - const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess(); |
| - if (cmd_line->HasSwitch(switches::kDisableAcceleratedVideoDecode)) |
| - return gpu::VideoDecodeAcceleratorSupportedProfiles(); |
| - |
| - // Query supported profiles for each VDA. The order of querying VDAs should |
| - // be the same as the order of initializing VDAs. Then the returned profile |
| - // can be initialized by corresponding VDA successfully. |
| -#if defined(OS_WIN) |
| - profiles = DXVAVideoDecodeAccelerator::GetSupportedProfiles(); |
| -#elif defined(OS_CHROMEOS) |
| - media::VideoDecodeAccelerator::SupportedProfiles vda_profiles; |
| -#if defined(USE_V4L2_CODEC) |
| - vda_profiles = V4L2VideoDecodeAccelerator::GetSupportedProfiles(); |
| - GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(vda_profiles, &profiles); |
| - vda_profiles = V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles(); |
| - GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(vda_profiles, &profiles); |
| -#endif |
| -#if defined(ARCH_CPU_X86_FAMILY) |
| - vda_profiles = VaapiVideoDecodeAccelerator::GetSupportedProfiles(); |
| - GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(vda_profiles, &profiles); |
| -#endif |
| -#elif defined(OS_MACOSX) |
| - profiles = VTVideoDecodeAccelerator::GetSupportedProfiles(); |
| -#elif defined(OS_ANDROID) |
| - profiles = AndroidVideoDecodeAccelerator::GetSupportedProfiles(); |
| -#endif |
| - return GpuVideoAcceleratorUtil::ConvertMediaToGpuDecodeProfiles(profiles); |
| +void GpuVideoDecodeAccelerator::OnSetCdm(int cdm_id) { |
| + DCHECK(video_decode_accelerator_.get()); |
|
dcheng
2015/11/13 19:54:53
No .get()
xhwang
2015/11/13 20:06:22
Done.
|
| + video_decode_accelerator_->SetCdm(cdm_id); |
| } |
| // Runs on IO thread if video_decode_accelerator_->CanDecodeOnIOThread() is |
| @@ -560,47 +620,6 @@ void GpuVideoDecodeAccelerator::OnFilterRemoved() { |
| filter_removed_.Signal(); |
| } |
| -void GpuVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer( |
| - int32 bitstream_buffer_id) { |
| - if (!Send(new AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed( |
| - host_route_id_, bitstream_buffer_id))) { |
| - DLOG(ERROR) |
| - << "Send(AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed) " |
| - << "failed"; |
| - } |
| -} |
| - |
| -void GpuVideoDecodeAccelerator::NotifyFlushDone() { |
| - if (!Send(new AcceleratedVideoDecoderHostMsg_FlushDone(host_route_id_))) |
| - DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_FlushDone) failed"; |
| -} |
| - |
| -void GpuVideoDecodeAccelerator::NotifyResetDone() { |
| - if (!Send(new AcceleratedVideoDecoderHostMsg_ResetDone(host_route_id_))) |
| - DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ResetDone) failed"; |
| -} |
| - |
| -void GpuVideoDecodeAccelerator::OnWillDestroyStub() { |
| - // The stub is going away, so we have to stop and destroy VDA here, before |
| - // returning, because the VDA may need the GL context to run and/or do its |
| - // cleanup. We cannot destroy the VDA before the IO thread message filter is |
| - // removed however, since we cannot service incoming messages with VDA gone. |
| - // We cannot simply check for existence of VDA on IO thread though, because |
| - // we don't want to synchronize the IO thread with the ChildThread. |
| - // So we have to wait for the RemoveFilter callback here instead and remove |
| - // the VDA after it arrives and before returning. |
| - if (filter_.get()) { |
| - stub_->channel()->RemoveFilter(filter_.get()); |
| - filter_removed_.Wait(); |
| - } |
| - |
| - stub_->channel()->RemoveRoute(host_route_id_); |
| - stub_->RemoveDestructionObserver(this); |
| - |
| - video_decode_accelerator_.reset(); |
| - delete this; |
| -} |
| - |
| void GpuVideoDecodeAccelerator::SetTextureCleared( |
| const media::Picture& picture) { |
| DCHECK(child_task_runner_->BelongsToCurrentThread()); |
| @@ -619,13 +638,6 @@ void GpuVideoDecodeAccelerator::SetTextureCleared( |
| uncleared_textures_.erase(it); |
| } |
| -bool GpuVideoDecodeAccelerator::Send(IPC::Message* message) { |
| - if (filter_.get() && io_task_runner_->BelongsToCurrentThread()) |
| - return filter_->SendOnIOThread(message); |
| - DCHECK(child_task_runner_->BelongsToCurrentThread()); |
| - return stub_->channel()->Send(message); |
| -} |
| - |
| void GpuVideoDecodeAccelerator::SendCreateDecoderReply(IPC::Message* message, |
| bool succeeded) { |
| GpuCommandBufferMsg_CreateVideoDecoder::WriteReplyParams(message, succeeded); |