OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h" | |
6 | |
7 #include <stdint.h> | |
8 | |
9 #include "base/bind.h" | |
10 #include "base/logging.h" | |
11 #include "base/memory/shared_memory.h" | |
12 #include "base/single_thread_task_runner.h" | |
13 #include "base/trace_event/trace_event.h" | |
14 #include "content/common/gpu/gpu_channel.h" | |
15 #include "content/common/gpu/gpu_messages.h" | |
16 #include "ipc/ipc_message_macros.h" | |
17 #include "ipc/message_filter.h" | |
18 #include "media/filters/jpeg_parser.h" | |
19 #include "ui/gfx/geometry/size.h" | |
20 | |
21 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) | |
22 #include "content/common/gpu/media/vaapi_jpeg_decode_accelerator.h" | |
23 #endif | |
24 | |
25 namespace base { | |
26 | |
27 void DefaultDeleter<content::GpuJpegDecodeAccelerator>::operator()( | |
28 void* jpeg_decode_accelerator) const { | |
29 static_cast<content::GpuJpegDecodeAccelerator*>(jpeg_decode_accelerator) | |
30 ->Destroy(); | |
31 } | |
32 | |
33 } // namespace base | |
34 | |
35 namespace content { | |
36 | |
37 class GpuJpegDecodeAccelerator::MessageFilter : public IPC::MessageFilter { | |
piman
2015/05/18 22:46:19
I would like to avoid having a separate filter per
kcwu
2015/05/22 19:46:51
I'm trying to implement your suggestion but not ye
piman
2015/05/22 22:27:48
Agreed on that.
kcwu
2015/05/25 18:57:16
Done.
| |
38 public: | |
39 MessageFilter(GpuJpegDecodeAccelerator* owner, int32 host_route_id) | |
40 : owner_(owner), host_route_id_(host_route_id) {} | |
41 | |
42 void OnChannelError() override { sender_ = NULL; } | |
43 | |
44 void OnChannelClosing() override { sender_ = NULL; } | |
45 | |
46 void OnFilterAdded(IPC::Sender* sender) override { sender_ = sender; } | |
47 | |
48 void OnFilterRemoved() override { | |
49 owner_->OnFilterRemoved(); | |
50 } | |
51 | |
52 bool OnMessageReceived(const IPC::Message& msg) override { | |
53 if (msg.routing_id() != host_route_id_) | |
54 return false; | |
55 | |
56 IPC_BEGIN_MESSAGE_MAP(MessageFilter, msg) | |
57 IPC_MESSAGE_FORWARD(AcceleratedJpegDecoderMsg_Decode, owner_, | |
58 GpuJpegDecodeAccelerator::OnDecode) | |
59 IPC_MESSAGE_UNHANDLED(return false;) | |
60 IPC_END_MESSAGE_MAP() | |
61 return true; | |
62 } | |
63 | |
64 bool SendOnIOThread(IPC::Message* message) { | |
65 DCHECK(!message->is_sync()); | |
66 if (!sender_) { | |
67 delete message; | |
68 return false; | |
69 } | |
70 return sender_->Send(message); | |
71 } | |
72 | |
73 protected: | |
74 virtual ~MessageFilter() {} | |
75 | |
76 private: | |
77 GpuJpegDecodeAccelerator* owner_; | |
78 int32 host_route_id_; | |
79 // The sender to which this filter was added. | |
80 IPC::Sender* sender_; | |
81 }; | |
82 | |
83 GpuJpegDecodeAccelerator::GpuJpegDecodeAccelerator( | |
84 GpuChannel* channel, | |
85 int32 host_route_id, | |
86 const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner) | |
87 : channel_(channel), | |
88 host_route_id_(host_route_id), | |
89 filter_removed_(true, false), | |
90 io_task_runner_(io_task_runner) { | |
91 child_task_runner_ = base::ThreadTaskRunnerHandle::Get(); | |
92 } | |
93 | |
94 GpuJpegDecodeAccelerator::~GpuJpegDecodeAccelerator() { | |
95 } | |
96 | |
97 bool GpuJpegDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) { | |
98 bool handled = true; | |
99 IPC_BEGIN_MESSAGE_MAP(GpuJpegDecodeAccelerator, msg) | |
100 IPC_MESSAGE_HANDLER(AcceleratedJpegDecoderMsg_Destroy, OnDestroy) | |
101 IPC_MESSAGE_UNHANDLED(handled = false) | |
102 IPC_END_MESSAGE_MAP() | |
103 return handled; | |
104 } | |
105 | |
106 bool GpuJpegDecodeAccelerator::Initialize() { | |
107 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
108 DCHECK(!jpeg_decode_accelerator_.get()); | |
109 | |
110 // When adding more platforms, GpuJpegDecodeAcceleratorAdapter::Supported need | |
111 // update as well. | |
piman
2015/05/18 22:46:19
nit: indent comments
kcwu
2015/05/25 18:57:16
This was indented by 'git cl format'. Would you li
| |
112 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) | |
113 jpeg_decode_accelerator_.reset( | |
114 new VaapiJpegDecodeAccelerator(io_task_runner_)); | |
115 #else | |
116 DVLOG(1) << "HW JPEG decode acceleration not available."; | |
117 return false; | |
118 #endif | |
119 | |
120 if (!channel_->AddRoute(host_route_id_, this)) { | |
piman
2015/05/18 22:46:19
Actually, if the routing is done by the filter, yo
kcwu
2015/05/25 18:57:16
Done.
| |
121 LOG(ERROR) << "GpuJpegDecodeAccelerator::Initialize(): " | |
122 "failed to add route"; | |
123 return false; | |
124 } | |
125 | |
126 filter_ = new MessageFilter(this, host_route_id_); | |
127 channel_->AddFilter(filter_.get()); | |
128 | |
129 return jpeg_decode_accelerator_->Initialize(this); | |
130 } | |
131 | |
132 void GpuJpegDecodeAccelerator::NotifyError( | |
133 int32_t buffer_id, | |
134 media::JpegDecodeAccelerator::Error error) { | |
135 Send(new AcceleratedJpegDecoderHostMsg_NotifyError(host_route_id_, buffer_id, | |
136 error)); | |
137 } | |
138 | |
139 void GpuJpegDecodeAccelerator::VideoFrameReady(int32_t bitstream_buffer_id) { | |
140 // This is called from JDA's decode thread. | |
141 Send(new AcceleratedJpegDecoderHostMsg_VideoFrameReady(host_route_id_, | |
142 bitstream_buffer_id)); | |
143 } | |
144 | |
145 void DecodeFinished(scoped_ptr<base::SharedMemory> shm) { | |
146 // Do nothing. Because VideoFrame is backed by |shm|, the purpose of this | |
147 // function is to just keep reference of |shm| to make sure it lives util | |
148 // decode finishes. | |
149 } | |
150 | |
151 void GpuJpegDecodeAccelerator::OnDecode( | |
piman
2015/05/18 22:46:19
This is called from the IO thread. Can you name it
kcwu
2015/05/25 18:57:16
Done.
| |
152 const AcceleratedJpegDecoderMsg_Decode_Params& params) { | |
153 DCHECK(io_task_runner_->BelongsToCurrentThread()); | |
154 DCHECK(jpeg_decode_accelerator_.get()); | |
155 TRACE_EVENT0("jpeg", "GpuJpegDecodeAccelerator::OnDecode"); | |
156 | |
157 if (params.input_buffer_id < 0) { | |
158 LOG(ERROR) << "BitstreamBuffer id " << params.input_buffer_id | |
159 << " out of range"; | |
160 NotifyError(params.input_buffer_id, | |
161 media::JpegDecodeAccelerator::INVALID_ARGUMENT); | |
162 return; | |
163 } | |
164 | |
165 media::BitstreamBuffer input_buffer(params.input_buffer_id, | |
166 params.input_buffer_handle, | |
167 params.input_buffer_size); | |
168 | |
169 scoped_ptr<base::SharedMemory> output_shm( | |
170 new base::SharedMemory(params.output_video_frame_handle, false)); | |
171 if (!output_shm->Map(params.output_buffer_size)) { | |
172 LOG(ERROR) << "Could not map output shared memory for input buffer id " | |
173 << params.input_buffer_id; | |
174 NotifyError(params.input_buffer_id, | |
175 media::JpegDecodeAccelerator::PLATFORM_FAILURE); | |
176 return; | |
177 } | |
178 | |
179 uint8* shm_memory = reinterpret_cast<uint8*>(output_shm->memory()); | |
180 scoped_refptr<media::VideoFrame> frame = | |
181 media::VideoFrame::WrapExternalPackedMemory( | |
182 media::VideoFrame::I420, | |
183 params.coded_size, | |
184 gfx::Rect(params.coded_size), | |
185 params.coded_size, | |
186 shm_memory, | |
187 params.output_buffer_size, | |
188 params.output_video_frame_handle, | |
189 0, | |
190 base::TimeDelta(), | |
191 base::Bind(DecodeFinished, base::Passed(&output_shm))); | |
192 | |
193 if (!frame.get()) { | |
194 LOG(ERROR) << "Could not create VideoFrame for input buffer id " | |
195 << params.input_buffer_id; | |
196 NotifyError(params.input_buffer_id, | |
197 media::JpegDecodeAccelerator::PLATFORM_FAILURE); | |
198 return; | |
199 } | |
200 | |
201 jpeg_decode_accelerator_->Decode(input_buffer, frame); | |
202 } | |
203 | |
204 void GpuJpegDecodeAccelerator::OnDestroy() { | |
205 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
206 DCHECK(jpeg_decode_accelerator_.get()); | |
207 Destroy(); | |
208 } | |
209 | |
210 void GpuJpegDecodeAccelerator::OnFilterRemoved() { | |
211 // We're destroying; cancel all callbacks. | |
212 filter_removed_.Signal(); | |
213 } | |
214 | |
215 void GpuJpegDecodeAccelerator::Destroy() { | |
216 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
217 // We cannot destroy the JDA before the IO thread message filter is | |
218 // removed however, since we cannot service incoming messages with JDA gone. | |
219 // We cannot simply check for existence of JDA on IO thread though, because | |
220 // we don't want to synchronize the IO thread with the ChildThread. | |
221 // So we have to wait for the RemoveFilter callback here instead and remove | |
222 // the JDA after it arrives and before returning. | |
223 if (filter_.get()) { | |
224 channel_->RemoveFilter(filter_.get()); | |
225 filter_removed_.Wait(); | |
226 } | |
227 | |
228 channel_->RemoveRoute(host_route_id_); | |
229 channel_->ReleaseJpegDecoder(host_route_id_); | |
230 jpeg_decode_accelerator_.reset(); | |
231 | |
232 delete this; | |
233 } | |
234 | |
235 bool GpuJpegDecodeAccelerator::Send(IPC::Message* message) { | |
236 if (io_task_runner_->BelongsToCurrentThread()) | |
237 return filter_->SendOnIOThread(message); | |
238 return channel_->Send(message); | |
239 } | |
240 | |
241 } // namespace content | |
OLD | NEW |