Index: content/common/gpu/media/gpu_jpeg_decode_accelerator.cc |
diff --git a/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc b/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..ba3976f5198ea03f5f6187f8533d70fb1ce5a97d |
--- /dev/null |
+++ b/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc |
@@ -0,0 +1,273 @@ |
+// Copyright 2015 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h" |
+ |
+#include <stdint.h> |
+ |
+#include "base/bind.h" |
+#include "base/logging.h" |
+#include "base/memory/shared_memory.h" |
+#include "base/single_thread_task_runner.h" |
+#include "base/trace_event/trace_event.h" |
+#include "content/common/gpu/gpu_channel.h" |
+#include "content/common/gpu/gpu_messages.h" |
+#include "ipc/ipc_message_macros.h" |
+#include "ipc/message_filter.h" |
+#include "media/filters/jpeg_parser.h" |
+#include "ui/gfx/geometry/size.h" |
+ |
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) |
+#include "content/common/gpu/media/vaapi_jpeg_decode_accelerator.h" |
+#endif |
+ |
+namespace { |
+ |
+void DecodeFinished(scoped_ptr<base::SharedMemory> shm) { |
+ // Do nothing. Because VideoFrame is backed by |shm|, the purpose of this |
+ // function is to just keep reference of |shm| to make sure it lives util |
+ // decode finishes. |
+} |
+ |
+} // namespace |
+ |
+namespace content { |
+ |
+class GpuJpegDecodeAccelerator::Client |
+ : public media::JpegDecodeAccelerator::Client { |
+ public: |
+ Client(content::GpuJpegDecodeAccelerator* owner, int32 route_id) |
+ : owner_(owner), route_id_(route_id) {} |
+ |
+ // media::JpegDecodeAccelerator::Client implementation. |
+ void VideoFrameReady(int32_t bitstream_buffer_id) override { |
+ owner_->NotifyDecodeStatus(route_id_, bitstream_buffer_id, |
+ media::JpegDecodeAccelerator::NO_ERROR); |
+ } |
+ |
+ void NotifyError(int32_t bitstream_buffer_id, |
+ media::JpegDecodeAccelerator::Error error) override { |
+ owner_->NotifyDecodeStatus(route_id_, bitstream_buffer_id, error); |
+ } |
+ |
+ void set_accelerator(scoped_ptr<media::JpegDecodeAccelerator> accelerator) { |
+ accelerator_ = accelerator.Pass(); |
+ } |
+ |
+ private: |
+ content::GpuJpegDecodeAccelerator* owner_; |
+ int32 route_id_; |
+ scoped_ptr<media::JpegDecodeAccelerator> accelerator_; |
+}; |
+ |
+class GpuJpegDecodeAccelerator::MessageFilter : public IPC::MessageFilter { |
+ public: |
+ MessageFilter(GpuJpegDecodeAccelerator* owner) : owner_(owner) {} |
piman
2015/05/26 23:31:53
nit: explicit.
kcwu
2015/05/27 14:13:23
Done.
|
+ |
+ void OnChannelError() override { sender_ = NULL; } |
+ |
+ void OnChannelClosing() override { sender_ = NULL; } |
+ |
+ void OnFilterAdded(IPC::Sender* sender) override { sender_ = sender; } |
+ |
+ bool OnMessageReceived(const IPC::Message& msg) override { |
+ const int32 route_id = msg.routing_id(); |
+ if (!accelerator_map_.Lookup(route_id)) |
+ return false; |
+ |
+ bool handled = true; |
+ IPC_BEGIN_MESSAGE_MAP_WITH_PARAM(MessageFilter, msg, &route_id) |
+ IPC_MESSAGE_HANDLER(AcceleratedJpegDecoderMsg_Decode, OnDecodeOnIOThread) |
+ IPC_MESSAGE_HANDLER(AcceleratedJpegDecoderMsg_Destroy, |
+ OnDestroyOnIOThread) |
+ IPC_MESSAGE_UNHANDLED(handled = false) |
+ IPC_END_MESSAGE_MAP() |
+ return handled; |
+ } |
+ |
+ bool SendOnIOThread(IPC::Message* message) { |
+ DCHECK(!message->is_sync()); |
+ if (!sender_) { |
+ delete message; |
+ return false; |
+ } |
+ return sender_->Send(message); |
+ } |
+ |
+ void AddClientOnIOThread(int32 route_id, |
+ media::JpegDecodeAccelerator* accelerator, |
+ IPC::Message* reply_msg) { |
+ DCHECK(owner_->io_task_runner_->BelongsToCurrentThread()); |
+ DCHECK(!accelerator_map_.Lookup(route_id)); |
+ |
+ accelerator_map_.AddWithID(accelerator, route_id); |
+ GpuMsg_CreateJpegDecoder::WriteReplyParams(reply_msg, true); |
+ SendOnIOThread(reply_msg); |
+ } |
+ |
+ void OnDestroyOnIOThread(const int32* route_id) { |
+ DCHECK(owner_->io_task_runner_->BelongsToCurrentThread()); |
+ accelerator_map_.Remove(*route_id); |
+ |
+ owner_->child_task_runner_->PostTask( |
+ FROM_HERE, base::Bind(&GpuJpegDecodeAccelerator::RemoveClient, |
+ base::Unretained(owner_), *route_id)); |
piman
2015/05/26 23:31:53
The GpuJpegDecodeAccelerator could be destroyed be
kcwu
2015/05/27 14:13:23
Done.Thanks for your detail suggestion.
However a
piman
2015/05/27 21:08:40
I would prefer if you didn't wait there. I don't t
wuchengli
2015/05/28 03:41:39
GpuJpegDecodeAccelerator owns the filter. When GJD
kcwu
2015/05/28 12:10:26
Done.
I made ~GpuJpegDecodeAccelerator waiting for
|
+ } |
+ |
+ void NotifyDecodeStatusOnIOThread(int32 route_id, |
+ int32_t buffer_id, |
+ media::JpegDecodeAccelerator::Error error) { |
+ DCHECK(owner_->io_task_runner_->BelongsToCurrentThread()); |
+ SendOnIOThread(new AcceleratedJpegDecoderHostMsg_DecodeAck( |
+ route_id, buffer_id, error)); |
+ } |
+ |
+ void OnDecodeOnIOThread( |
+ const int32* route_id, |
+ const AcceleratedJpegDecoderMsg_Decode_Params& params) { |
+ DCHECK(owner_->io_task_runner_->BelongsToCurrentThread()); |
+ DCHECK(route_id); |
+ TRACE_EVENT0("jpeg", "GpuJpegDecodeAccelerator::MessageFilter::OnDecode"); |
+ |
+ if (params.input_buffer_id < 0) { |
+ LOG(ERROR) << "BitstreamBuffer id " << params.input_buffer_id |
+ << " out of range"; |
+ NotifyDecodeStatusOnIOThread( |
+ *route_id, params.input_buffer_id, |
+ media::JpegDecodeAccelerator::INVALID_ARGUMENT); |
+ return; |
+ } |
+ |
+ media::BitstreamBuffer input_buffer(params.input_buffer_id, |
+ params.input_buffer_handle, |
+ params.input_buffer_size); |
+ |
+ scoped_ptr<base::SharedMemory> output_shm( |
+ new base::SharedMemory(params.output_video_frame_handle, false)); |
+ if (!output_shm->Map(params.output_buffer_size)) { |
+ LOG(ERROR) << "Could not map output shared memory for input buffer id " |
+ << params.input_buffer_id; |
+ NotifyDecodeStatusOnIOThread( |
+ *route_id, params.input_buffer_id, |
+ media::JpegDecodeAccelerator::PLATFORM_FAILURE); |
+ return; |
+ } |
+ |
+ uint8* shm_memory = reinterpret_cast<uint8*>(output_shm->memory()); |
+ scoped_refptr<media::VideoFrame> frame = |
+ media::VideoFrame::WrapExternalPackedMemory( |
+ media::VideoFrame::I420, |
+ params.coded_size, |
+ gfx::Rect(params.coded_size), |
+ params.coded_size, |
+ shm_memory, |
+ params.output_buffer_size, |
+ params.output_video_frame_handle, |
+ 0, |
+ base::TimeDelta(), |
+ base::Bind(DecodeFinished, base::Passed(&output_shm))); |
+ |
+ if (!frame.get()) { |
+ LOG(ERROR) << "Could not create VideoFrame for input buffer id " |
+ << params.input_buffer_id; |
+ NotifyDecodeStatusOnIOThread( |
+ *route_id, params.input_buffer_id, |
+ media::JpegDecodeAccelerator::PLATFORM_FAILURE); |
+ return; |
+ } |
+ |
+ media::JpegDecodeAccelerator* accelerator = |
+ accelerator_map_.Lookup(*route_id); |
+ DCHECK(accelerator); |
+ accelerator->Decode(input_buffer, frame); |
+ } |
+ |
+ protected: |
+ virtual ~MessageFilter() {} |
+ |
+ private: |
+ GpuJpegDecodeAccelerator* owner_; |
+ |
+ // The sender to which this filter was added. |
+ IPC::Sender* sender_; |
+ |
+ // A map from route id to JpegDecodeAccelerator. |
+ // This doesn't take ownership. The ownership is still maintained by |
+ // GpuJpegDecodeAccelerator on child thread. |
+ IDMap<media::JpegDecodeAccelerator, IDMapExternalPointer> accelerator_map_; |
+}; |
+ |
+GpuJpegDecodeAccelerator::GpuJpegDecodeAccelerator( |
+ GpuChannel* channel, |
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner) |
+ : channel_(channel), |
+ filter_(new MessageFilter(this)), |
+ child_task_runner_(base::ThreadTaskRunnerHandle::Get()), |
+ io_task_runner_(io_task_runner) { |
+} |
+ |
+GpuJpegDecodeAccelerator::~GpuJpegDecodeAccelerator() { |
+ DCHECK(CalledOnValidThread()); |
+ DCHECK(client_map_.IsEmpty()); |
+} |
+ |
+bool GpuJpegDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) { |
+ // Messages are actually handled in filter on IO thread. |
+ return false; |
+} |
+ |
+void GpuJpegDecodeAccelerator::AddClient(int32 route_id, |
+ IPC::Message* reply_msg) { |
+ DCHECK(CalledOnValidThread()); |
+ scoped_ptr<media::JpegDecodeAccelerator> accelerator; |
+ |
+// When adding more platforms, GpuJpegDecoder::Supported need |
+// update as well. |
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) |
+ accelerator.reset(new VaapiJpegDecodeAccelerator(io_task_runner_)); |
+#else |
+ DVLOG(1) << "HW JPEG decode acceleration not available."; |
+#endif |
+ |
+ scoped_ptr<Client> client(new Client(this, route_id)); |
+ if (!accelerator.get() || !accelerator->Initialize(client.get())) { |
+ DLOG(ERROR) << "JPEG accelerator Initialize failed"; |
+ GpuMsg_CreateJpegDecoder::WriteReplyParams(reply_msg, false); |
+ Send(reply_msg); |
+ return; |
+ } |
+ |
+ if (client_map_.IsEmpty()) |
+ channel_->AddFilter(filter_.get()); |
+ |
+ io_task_runner_->PostTask( |
+ FROM_HERE, base::Bind(&MessageFilter::AddClientOnIOThread, filter_, |
+ route_id, accelerator.get(), reply_msg)); |
+ |
+ client->set_accelerator(accelerator.Pass()); |
+ client_map_.AddWithID(client.release(), route_id); |
+} |
+ |
+void GpuJpegDecodeAccelerator::NotifyDecodeStatus( |
+ int32 route_id, |
+ int32_t buffer_id, |
+ media::JpegDecodeAccelerator::Error error) { |
+ DCHECK(CalledOnValidThread()); |
+ Send(new AcceleratedJpegDecoderHostMsg_DecodeAck(route_id, buffer_id, error)); |
+} |
+ |
+void GpuJpegDecodeAccelerator::RemoveClient(int32 route_id) { |
+ DCHECK(CalledOnValidThread()); |
+ |
+ client_map_.Remove(route_id); |
+ if (client_map_.IsEmpty()) |
+ channel_->RemoveFilter(filter_.get()); |
+} |
+ |
+bool GpuJpegDecodeAccelerator::Send(IPC::Message* message) { |
+ DCHECK(CalledOnValidThread()); |
+ return channel_->Send(message); |
+} |
+ |
+} // namespace content |