Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(970)

Unified Diff: content/common/gpu/media/gpu_jpeg_decode_accelerator.cc

Issue 1124423008: MJPEG acceleration for video capture using VAAPI, the GPU and IPC part (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@mjpeg-1-media
Patch Set: Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: content/common/gpu/media/gpu_jpeg_decode_accelerator.cc
diff --git a/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc b/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc
new file mode 100644
index 0000000000000000000000000000000000000000..3fcfc04adf62cf81c191a3658a9de38950a4644c
--- /dev/null
+++ b/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc
@@ -0,0 +1,313 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h"
+
+#include <stdint.h>
+
+#include <map>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/memory/shared_memory.h"
+#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
+#include "base/trace_event/trace_event.h"
+#include "content/common/gpu/gpu_channel.h"
+#include "content/common/gpu/gpu_messages.h"
+#include "ipc/ipc_message_macros.h"
+#include "ipc/message_filter.h"
+#include "media/filters/jpeg_parser.h"
+#include "ui/gfx/geometry/size.h"
+
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
+#include "content/common/gpu/media/vaapi_jpeg_decode_accelerator.h"
+#endif
+
+namespace {
+
+void DecodeFinished(scoped_ptr<base::SharedMemory> shm) {
+ // Do nothing. Because VideoFrame is backed by |shm|, the purpose of this
+ // function is to just keep reference of |shm| to make sure it lives util
+ // decode finishes.
+}
+
+} // namespace
+
+namespace content {
+
+class GpuJpegDecodeAccelerator::Client
+ : public media::JpegDecodeAccelerator::Client {
+ public:
+ Client(content::GpuJpegDecodeAccelerator* owner, int32 route_id)
+ : owner_(owner->AsWeakPtr()), route_id_(route_id) {}
+
+ // media::JpegDecodeAccelerator::Client implementation.
+ void VideoFrameReady(int32_t bitstream_buffer_id) override {
+ if (owner_)
+ owner_->NotifyDecodeStatus(route_id_, bitstream_buffer_id,
+ media::JpegDecodeAccelerator::NO_ERROR);
+ }
+
+ void NotifyError(int32_t bitstream_buffer_id,
+ media::JpegDecodeAccelerator::Error error) override {
+ if (owner_)
+ owner_->NotifyDecodeStatus(route_id_, bitstream_buffer_id, error);
+ }
+
+ void Decode(const media::BitstreamBuffer& bitstream_buffer,
+ const scoped_refptr<media::VideoFrame>& video_frame) {
+ DCHECK(accelerator_);
+ accelerator_->Decode(bitstream_buffer, video_frame);
+ }
+
+ void set_accelerator(scoped_ptr<media::JpegDecodeAccelerator> accelerator) {
+ accelerator_ = accelerator.Pass();
+ }
+
+ private:
+ base::WeakPtr<content::GpuJpegDecodeAccelerator> owner_;
+ int32 route_id_;
+ scoped_ptr<media::JpegDecodeAccelerator> accelerator_;
+};
+
+// Create, destroy, and RemoveClient run on child thread. All other methods run
+// on IO thread.
+class GpuJpegDecodeAccelerator::MessageFilter : public IPC::MessageFilter {
+ public:
+ explicit MessageFilter(GpuJpegDecodeAccelerator* owner)
+ : owner_(owner->AsWeakPtr()),
+ child_task_runner_(owner_->child_task_runner_),
+ io_task_runner_(owner_->io_task_runner_) {}
+
+ void OnChannelError() override { sender_ = NULL; }
Pawel Osciak 2015/05/28 09:13:17 s/NULL/nullptr/ here and in other places.
kcwu 2015/05/28 12:10:27 Done.
+
+ void OnChannelClosing() override { sender_ = NULL; }
+
+ void OnFilterAdded(IPC::Sender* sender) override { sender_ = sender; }
+
+ bool OnMessageReceived(const IPC::Message& msg) override {
+ const int32 route_id = msg.routing_id();
+ if (client_map_.count(route_id) == 0)
+ return false;
+
+ bool handled = true;
+ IPC_BEGIN_MESSAGE_MAP_WITH_PARAM(MessageFilter, msg, &route_id)
+ IPC_MESSAGE_HANDLER(AcceleratedJpegDecoderMsg_Decode, OnDecodeOnIOThread)
+ IPC_MESSAGE_HANDLER(AcceleratedJpegDecoderMsg_Destroy,
+ OnDestroyOnIOThread)
+ IPC_MESSAGE_UNHANDLED(handled = false)
+ IPC_END_MESSAGE_MAP()
+ return handled;
+ }
+
+ bool SendOnIOThread(IPC::Message* message) {
+ DCHECK(!message->is_sync());
+ if (!sender_) {
+ delete message;
+ return false;
+ }
+ return sender_->Send(message);
+ }
+
+ void AddClientOnIOThread(int32 route_id,
+ Client* client,
+ IPC::Message* reply_msg) {
+ DCHECK(io_task_runner_->BelongsToCurrentThread());
+ DCHECK(client_map_.count(route_id) == 0);
+
+ client_map_[route_id] = client;
+ GpuMsg_CreateJpegDecoder::WriteReplyParams(reply_msg, true);
+ SendOnIOThread(reply_msg);
+ }
+
+ void OnDestroyOnIOThread(const int32* route_id) {
+ DCHECK(io_task_runner_->BelongsToCurrentThread());
+ const auto& it = client_map_.find(*route_id);
+ DCHECK(it != client_map_.end());
Pawel Osciak 2015/05/28 09:13:17 DCHECK_NE
kcwu 2015/05/28 12:10:27 DCHECK_NE is not for iterator. The arguments need
+ Client* client = it->second;
+ DCHECK(client);
+ client_map_.erase(it);
+
+ child_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&MessageFilter::DestroyClient, this, client));
+ }
+
+ void DestroyClient(Client* client) {
+ DCHECK(child_task_runner_->BelongsToCurrentThread());
+ delete client;
+ if (owner_)
+ owner_->ClientRemoved();
+ }
+
+ void NotifyDecodeStatusOnIOThread(int32 route_id,
+ int32_t buffer_id,
+ media::JpegDecodeAccelerator::Error error) {
+ DCHECK(io_task_runner_->BelongsToCurrentThread());
+ SendOnIOThread(new AcceleratedJpegDecoderHostMsg_DecodeAck(
+ route_id, buffer_id, error));
+ }
+
+ void OnDecodeOnIOThread(
+ const int32* route_id,
+ const AcceleratedJpegDecoderMsg_Decode_Params& params) {
+ DCHECK(io_task_runner_->BelongsToCurrentThread());
+ DCHECK(route_id);
+ TRACE_EVENT0("jpeg", "GpuJpegDecodeAccelerator::MessageFilter::OnDecode");
+
+ if (params.input_buffer_id < 0) {
+ LOG(ERROR) << "BitstreamBuffer id " << params.input_buffer_id
+ << " out of range";
+ NotifyDecodeStatusOnIOThread(
+ *route_id, params.input_buffer_id,
+ media::JpegDecodeAccelerator::INVALID_ARGUMENT);
+ return;
+ }
+
+ media::BitstreamBuffer input_buffer(params.input_buffer_id,
+ params.input_buffer_handle,
+ params.input_buffer_size);
+
+ scoped_ptr<base::SharedMemory> output_shm(
+ new base::SharedMemory(params.output_video_frame_handle, false));
+ if (!output_shm->Map(params.output_buffer_size)) {
+ LOG(ERROR) << "Could not map output shared memory for input buffer id "
+ << params.input_buffer_id;
+ NotifyDecodeStatusOnIOThread(
+ *route_id, params.input_buffer_id,
+ media::JpegDecodeAccelerator::PLATFORM_FAILURE);
+ return;
+ }
+
+ uint8* shm_memory = reinterpret_cast<uint8*>(output_shm->memory());
+ scoped_refptr<media::VideoFrame> frame =
+ media::VideoFrame::WrapExternalPackedMemory(
+ media::VideoFrame::I420,
+ params.coded_size,
+ gfx::Rect(params.coded_size),
+ params.coded_size,
+ shm_memory,
+ params.output_buffer_size,
+ params.output_video_frame_handle,
+ 0,
+ base::TimeDelta(),
+ base::Bind(DecodeFinished, base::Passed(&output_shm)));
+
+ if (!frame.get()) {
+ LOG(ERROR) << "Could not create VideoFrame for input buffer id "
+ << params.input_buffer_id;
+ NotifyDecodeStatusOnIOThread(
+ *route_id, params.input_buffer_id,
+ media::JpegDecodeAccelerator::PLATFORM_FAILURE);
+ return;
+ }
+
+ DCHECK(client_map_.count(*route_id) > 0);
Pawel Osciak 2015/05/28 09:13:17 DCHECK_GT
kcwu 2015/05/28 12:10:27 Done.
+ Client* client = client_map_[*route_id];
+ client->Decode(input_buffer, frame);
+ }
+
+ protected:
+ ~MessageFilter() override {
+ // Make sure |client_map_| is deleted on child thread.
+ DCHECK(child_task_runner_->BelongsToCurrentThread());
kcwu 2015/05/27 14:13:23 The ownership of |Client| is maintained by the fil
piman 2015/05/27 21:08:40 Correct, it could be deleted on either the child t
+ STLDeleteValues(&client_map_);
+ }
+
+ private:
+ base::WeakPtr<GpuJpegDecodeAccelerator> owner_;
+
+ // GPU child task runner.
+ scoped_refptr<base::SingleThreadTaskRunner> child_task_runner_;
+
+ // GPU IO task runner.
+ scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
+
+ // The sender to which this filter was added.
+ IPC::Sender* sender_;
+
+ // A map from route id to JpegDecodeAccelerator.
+ // Unless in destructor (on child thread), |client_map_| should only be
+ // accessed on IO thread.
+ std::map<int32, Client*> client_map_;
Pawel Osciak 2015/05/28 09:13:17 Could we use a scoper?
kcwu 2015/05/28 12:10:27 Scoper implys it will automatically delete Client
+};
+
+GpuJpegDecodeAccelerator::GpuJpegDecodeAccelerator(
+ GpuChannel* channel,
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner)
+ : channel_(channel),
+ child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
+ io_task_runner_(io_task_runner),
+ client_number_(0) {
+}
+
+GpuJpegDecodeAccelerator::~GpuJpegDecodeAccelerator() {
+ DCHECK(CalledOnValidThread());
+ if (client_number_ > 0)
+ channel_->RemoveFilter(filter_.get());
+}
+
+bool GpuJpegDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) {
+ // Messages are actually handled in filter on IO thread.
+ return false;
+}
+
+void GpuJpegDecodeAccelerator::AddClient(int32 route_id,
+ IPC::Message* reply_msg) {
+ DCHECK(CalledOnValidThread());
+ scoped_ptr<media::JpegDecodeAccelerator> accelerator;
+
+// When adding more platforms, GpuJpegDecoder::Supported need
+// update as well.
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
+ accelerator.reset(new VaapiJpegDecodeAccelerator(io_task_runner_));
+#else
+ DVLOG(1) << "HW JPEG decode acceleration not available.";
+#endif
+
+ scoped_ptr<Client> client(new Client(this, route_id));
+ if (!accelerator.get() || !accelerator->Initialize(client.get())) {
+ DLOG(ERROR) << "JPEG accelerator Initialize failed";
+ GpuMsg_CreateJpegDecoder::WriteReplyParams(reply_msg, false);
+ Send(reply_msg);
+ return;
+ }
+ client->set_accelerator(accelerator.Pass());
+
+ if (client_number_ == 0) {
+ filter_ = new MessageFilter(this);
+ // This should be before AddClientOnIOThread.
+ channel_->AddFilter(filter_.get());
+ }
+ client_number_++;
+
+ io_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&MessageFilter::AddClientOnIOThread, filter_,
+ route_id, client.release(), reply_msg));
+}
+
+void GpuJpegDecodeAccelerator::NotifyDecodeStatus(
+ int32 route_id,
+ int32_t buffer_id,
+ media::JpegDecodeAccelerator::Error error) {
+ DCHECK(CalledOnValidThread());
+ Send(new AcceleratedJpegDecoderHostMsg_DecodeAck(route_id, buffer_id, error));
+}
+
+void GpuJpegDecodeAccelerator::ClientRemoved() {
+ DCHECK(CalledOnValidThread());
+ DCHECK_GT(client_number_, 0);
+ client_number_--;
+ if (client_number_ == 0) {
+ channel_->RemoveFilter(filter_.get());
+ filter_ = nullptr;
+ }
+}
+
+bool GpuJpegDecodeAccelerator::Send(IPC::Message* message) {
+ DCHECK(CalledOnValidThread());
+ return channel_->Send(message);
+}
+
+} // namespace content

Powered by Google App Engine
This is Rietveld 408576698