OLD | NEW |
1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "chrome/gpu/arc_gpu_video_decode_accelerator.h" | 5 #include "chrome/gpu/arc_gpu_video_decode_accelerator.h" |
6 | 6 |
7 #include "base/callback_helpers.h" | 7 #include "base/callback_helpers.h" |
8 #include "base/logging.h" | 8 #include "base/logging.h" |
| 9 #include "base/numerics/safe_math.h" |
9 #include "base/run_loop.h" | 10 #include "base/run_loop.h" |
10 #include "content/public/gpu/gpu_video_decode_accelerator_factory.h" | 11 #include "content/public/gpu/gpu_video_decode_accelerator_factory.h" |
11 #include "media/base/video_frame.h" | 12 #include "media/base/video_frame.h" |
12 | 13 |
13 namespace chromeos { | 14 namespace chromeos { |
14 namespace arc { | 15 namespace arc { |
15 | 16 |
16 ArcGpuVideoDecodeAccelerator::InputRecord::InputRecord( | 17 ArcGpuVideoDecodeAccelerator::InputRecord::InputRecord( |
17 int32_t bitstream_buffer_id, | 18 int32_t bitstream_buffer_id, |
18 uint32_t buffer_index, | 19 uint32_t buffer_index, |
19 int64_t timestamp) | 20 int64_t timestamp) |
20 : bitstream_buffer_id(bitstream_buffer_id), | 21 : bitstream_buffer_id(bitstream_buffer_id), |
21 buffer_index(buffer_index), | 22 buffer_index(buffer_index), |
22 timestamp(timestamp) {} | 23 timestamp(timestamp) {} |
23 | 24 |
24 ArcGpuVideoDecodeAccelerator::InputBufferInfo::InputBufferInfo() | 25 ArcGpuVideoDecodeAccelerator::InputBufferInfo::InputBufferInfo() = default; |
25 : offset(0), length(0) {} | |
26 | 26 |
27 ArcGpuVideoDecodeAccelerator::InputBufferInfo::InputBufferInfo( | 27 ArcGpuVideoDecodeAccelerator::InputBufferInfo::InputBufferInfo( |
28 InputBufferInfo&& other) | 28 InputBufferInfo&& other) = default; |
29 : handle(std::move(other.handle)), | |
30 offset(other.offset), | |
31 length(other.length) {} | |
32 | 29 |
33 ArcGpuVideoDecodeAccelerator::InputBufferInfo::~InputBufferInfo() {} | 30 ArcGpuVideoDecodeAccelerator::InputBufferInfo::~InputBufferInfo() = default; |
| 31 |
| 32 ArcGpuVideoDecodeAccelerator::OutputBufferInfo::OutputBufferInfo() = default; |
| 33 |
| 34 ArcGpuVideoDecodeAccelerator::OutputBufferInfo::OutputBufferInfo( |
| 35 OutputBufferInfo&& other) = default; |
| 36 |
| 37 ArcGpuVideoDecodeAccelerator::OutputBufferInfo::~OutputBufferInfo() = default; |
34 | 38 |
35 ArcGpuVideoDecodeAccelerator::ArcGpuVideoDecodeAccelerator() | 39 ArcGpuVideoDecodeAccelerator::ArcGpuVideoDecodeAccelerator() |
36 : arc_client_(nullptr), | 40 : arc_client_(nullptr), |
37 next_bitstream_buffer_id_(0), | 41 next_bitstream_buffer_id_(0), |
| 42 output_pixel_format_(media::PIXEL_FORMAT_UNKNOWN), |
38 output_buffer_size_(0) {} | 43 output_buffer_size_(0) {} |
39 | 44 |
40 ArcGpuVideoDecodeAccelerator::~ArcGpuVideoDecodeAccelerator() {} | 45 ArcGpuVideoDecodeAccelerator::~ArcGpuVideoDecodeAccelerator() {} |
41 | 46 |
42 namespace { | 47 namespace { |
43 | 48 |
44 // An arbitrary chosen limit of the number of buffers. The number of | 49 // An arbitrary chosen limit of the number of buffers. The number of |
45 // buffers used is requested from the untrusted client side. | 50 // buffers used is requested from the untrusted client side. |
46 const size_t kMaxBufferCount = 128; | 51 const size_t kMaxBufferCount = 128; |
47 | 52 |
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
146 if (!ValidatePortAndIndex(port, index)) { | 151 if (!ValidatePortAndIndex(port, index)) { |
147 arc_client_->OnError(INVALID_ARGUMENT); | 152 arc_client_->OnError(INVALID_ARGUMENT); |
148 return; | 153 return; |
149 } | 154 } |
150 InputBufferInfo* input_info = &input_buffer_info_[index]; | 155 InputBufferInfo* input_info = &input_buffer_info_[index]; |
151 input_info->handle = std::move(ashmem_fd); | 156 input_info->handle = std::move(ashmem_fd); |
152 input_info->offset = offset; | 157 input_info->offset = offset; |
153 input_info->length = length; | 158 input_info->length = length; |
154 } | 159 } |
155 | 160 |
| 161 bool ArcGpuVideoDecodeAccelerator::VerifyStride(const base::ScopedFD& dmabuf_fd, |
| 162 int32_t stride) const { |
| 163 off_t size = lseek(dmabuf_fd.get(), 0, SEEK_END); |
| 164 lseek(dmabuf_fd.get(), 0, SEEK_SET); |
| 165 |
| 166 if (size < 0) { |
| 167 DPLOG(ERROR) << "fail to find the size of dmabuf"; |
| 168 return false; |
| 169 } |
| 170 |
| 171 int height = coded_size_.height(); |
| 172 switch (output_pixel_format_) { |
| 173 case media::PIXEL_FORMAT_I420: |
| 174 case media::PIXEL_FORMAT_YV12: |
| 175 case media::PIXEL_FORMAT_NV12: |
| 176 case media::PIXEL_FORMAT_NV21: |
| 177 // Adjusts the height for UV plane. |
| 178 // The coded height should always be even. But for security reason, we |
| 179 // still round up to two here in case VDA reports an incorrect value. |
| 180 height += (height + 1) / 2; |
| 181 break; |
| 182 case media::PIXEL_FORMAT_ARGB: |
| 183 // No need to adjust height. |
| 184 break; |
| 185 default: |
| 186 DLOG(ERROR) << "Format not supported: " << output_pixel_format_; |
| 187 return false; |
| 188 } |
| 189 base::CheckedNumeric<off_t> used_bytes(height); |
| 190 used_bytes *= stride; |
| 191 |
| 192 if (stride < 0 || !used_bytes.IsValid() || used_bytes.ValueOrDie() > size) { |
| 193 DLOG(ERROR) << "invalid stride: " << stride << ", height: " << height |
| 194 << ", size of dmabuf: " << size; |
| 195 return false; |
| 196 } |
| 197 |
| 198 return true; |
| 199 } |
| 200 |
156 void ArcGpuVideoDecodeAccelerator::BindDmabuf(PortType port, | 201 void ArcGpuVideoDecodeAccelerator::BindDmabuf(PortType port, |
157 uint32_t index, | 202 uint32_t index, |
158 base::ScopedFD dmabuf_fd) { | 203 base::ScopedFD dmabuf_fd, |
| 204 int32_t stride) { |
159 DCHECK(thread_checker_.CalledOnValidThread()); | 205 DCHECK(thread_checker_.CalledOnValidThread()); |
160 | 206 |
161 if (!vda_) { | 207 if (!vda_) { |
162 DLOG(ERROR) << "VDA not initialized"; | 208 DLOG(ERROR) << "VDA not initialized"; |
163 return; | 209 return; |
164 } | 210 } |
165 | 211 |
166 if (port != PORT_OUTPUT) { | 212 if (port != PORT_OUTPUT) { |
167 DLOG(ERROR) << "Dmabuf is only supported for input"; | 213 DLOG(ERROR) << "Dmabuf is only supported for input"; |
168 arc_client_->OnError(INVALID_ARGUMENT); | 214 arc_client_->OnError(INVALID_ARGUMENT); |
169 return; | 215 return; |
170 } | 216 } |
171 if (!ValidatePortAndIndex(port, index)) { | 217 if (!ValidatePortAndIndex(port, index)) { |
172 arc_client_->OnError(INVALID_ARGUMENT); | 218 arc_client_->OnError(INVALID_ARGUMENT); |
173 return; | 219 return; |
174 } | 220 } |
175 buffers_pending_import_[index] = std::move(dmabuf_fd); | 221 if (!VerifyStride(dmabuf_fd, stride)) { |
| 222 arc_client_->OnError(INVALID_ARGUMENT); |
| 223 return; |
| 224 } |
| 225 |
| 226 OutputBufferInfo& info = buffers_pending_import_[index]; |
| 227 info.handle = std::move(dmabuf_fd); |
| 228 info.stride = stride; |
176 } | 229 } |
177 | 230 |
178 void ArcGpuVideoDecodeAccelerator::UseBuffer(PortType port, | 231 void ArcGpuVideoDecodeAccelerator::UseBuffer(PortType port, |
179 uint32_t index, | 232 uint32_t index, |
180 const BufferMetadata& metadata) { | 233 const BufferMetadata& metadata) { |
181 DVLOG(5) << "UseBuffer(port=" << port << ", index=" << index | 234 DVLOG(5) << "UseBuffer(port=" << port << ", index=" << index |
182 << ", metadata=(bytes_used=" << metadata.bytes_used | 235 << ", metadata=(bytes_used=" << metadata.bytes_used |
183 << ", timestamp=" << metadata.timestamp << ")"; | 236 << ", timestamp=" << metadata.timestamp << ")"; |
184 DCHECK(thread_checker_.CalledOnValidThread()); | 237 DCHECK(thread_checker_.CalledOnValidThread()); |
185 if (!vda_) { | 238 if (!vda_) { |
(...skipping 19 matching lines...) Expand all Loading... |
205 } | 258 } |
206 CreateInputRecord(bitstream_buffer_id, index, metadata.timestamp); | 259 CreateInputRecord(bitstream_buffer_id, index, metadata.timestamp); |
207 vda_->Decode(media::BitstreamBuffer( | 260 vda_->Decode(media::BitstreamBuffer( |
208 bitstream_buffer_id, base::SharedMemoryHandle(dup_fd, true), | 261 bitstream_buffer_id, base::SharedMemoryHandle(dup_fd, true), |
209 metadata.bytes_used, input_info->offset)); | 262 metadata.bytes_used, input_info->offset)); |
210 break; | 263 break; |
211 } | 264 } |
212 case PORT_OUTPUT: { | 265 case PORT_OUTPUT: { |
213 // is_valid() is true for the first time the buffer is passed to the VDA. | 266 // is_valid() is true for the first time the buffer is passed to the VDA. |
214 // In that case, VDA needs to import the buffer first. | 267 // In that case, VDA needs to import the buffer first. |
215 if (buffers_pending_import_[index].is_valid()) { | 268 OutputBufferInfo& info = buffers_pending_import_[index]; |
| 269 if (info.handle.is_valid()) { |
216 gfx::GpuMemoryBufferHandle handle; | 270 gfx::GpuMemoryBufferHandle handle; |
217 #if defined(USE_OZONE) | 271 #if defined(USE_OZONE) |
218 handle.native_pixmap_handle.fd = base::FileDescriptor( | 272 handle.native_pixmap_handle.fd = |
219 buffers_pending_import_[index].release(), true); | 273 base::FileDescriptor(info.handle.release(), true); |
| 274 handle.native_pixmap_handle.stride = info.stride; |
220 #endif | 275 #endif |
221 vda_->ImportBufferForPicture(index, {handle}); | 276 vda_->ImportBufferForPicture(index, {handle}); |
222 } else { | 277 } else { |
223 vda_->ReusePictureBuffer(index); | 278 vda_->ReusePictureBuffer(index); |
224 } | 279 } |
225 break; | 280 break; |
226 } | 281 } |
227 default: | 282 default: |
228 NOTREACHED(); | 283 NOTREACHED(); |
229 } | 284 } |
(...skipping 20 matching lines...) Expand all Loading... |
250 void ArcGpuVideoDecodeAccelerator::ProvidePictureBuffers( | 305 void ArcGpuVideoDecodeAccelerator::ProvidePictureBuffers( |
251 uint32_t requested_num_of_buffers, | 306 uint32_t requested_num_of_buffers, |
252 uint32_t textures_per_buffer, | 307 uint32_t textures_per_buffer, |
253 const gfx::Size& dimensions, | 308 const gfx::Size& dimensions, |
254 uint32_t texture_target) { | 309 uint32_t texture_target) { |
255 DVLOG(5) << "ProvidePictureBuffers(" | 310 DVLOG(5) << "ProvidePictureBuffers(" |
256 << "requested_num_of_buffers=" << requested_num_of_buffers | 311 << "requested_num_of_buffers=" << requested_num_of_buffers |
257 << ", dimensions=" << dimensions.ToString() << ")"; | 312 << ", dimensions=" << dimensions.ToString() << ")"; |
258 DCHECK(thread_checker_.CalledOnValidThread()); | 313 DCHECK(thread_checker_.CalledOnValidThread()); |
259 coded_size_ = dimensions; | 314 coded_size_ = dimensions; |
| 315 output_pixel_format_ = vda_->GetOutputFormat(); |
260 | 316 |
261 VideoFormat video_format; | 317 VideoFormat video_format; |
262 media::VideoPixelFormat output_format = vda_->GetOutputFormat(); | 318 switch (output_pixel_format_) { |
263 switch (output_format) { | |
264 case media::PIXEL_FORMAT_I420: | 319 case media::PIXEL_FORMAT_I420: |
265 case media::PIXEL_FORMAT_YV12: | 320 case media::PIXEL_FORMAT_YV12: |
266 case media::PIXEL_FORMAT_NV12: | 321 case media::PIXEL_FORMAT_NV12: |
267 case media::PIXEL_FORMAT_NV21: | 322 case media::PIXEL_FORMAT_NV21: |
268 // HAL_PIXEL_FORMAT_YCbCr_420_888 is the flexible pixel format in Android | 323 // HAL_PIXEL_FORMAT_YCbCr_420_888 is the flexible pixel format in Android |
269 // which handles all 420 formats, with both orderings of chroma (CbCr and | 324 // which handles all 420 formats, with both orderings of chroma (CbCr and |
270 // CrCb) as well as planar and semi-planar layouts. | 325 // CrCb) as well as planar and semi-planar layouts. |
271 video_format.pixel_format = HAL_PIXEL_FORMAT_YCbCr_420_888; | 326 video_format.pixel_format = HAL_PIXEL_FORMAT_YCbCr_420_888; |
272 break; | 327 break; |
273 case media::PIXEL_FORMAT_ARGB: | 328 case media::PIXEL_FORMAT_ARGB: |
274 video_format.pixel_format = HAL_PIXEL_FORMAT_BGRA_8888; | 329 video_format.pixel_format = HAL_PIXEL_FORMAT_BGRA_8888; |
275 break; | 330 break; |
276 default: | 331 default: |
277 DLOG(ERROR) << "Format not supported: " << output_format; | 332 DLOG(ERROR) << "Format not supported: " << output_pixel_format_; |
278 arc_client_->OnError(PLATFORM_FAILURE); | 333 arc_client_->OnError(PLATFORM_FAILURE); |
279 return; | 334 return; |
280 } | 335 } |
281 video_format.buffer_size = | 336 video_format.buffer_size = |
282 media::VideoFrame::AllocationSize(output_format, coded_size_); | 337 media::VideoFrame::AllocationSize(output_pixel_format_, coded_size_); |
283 output_buffer_size_ = video_format.buffer_size; | 338 output_buffer_size_ = video_format.buffer_size; |
284 video_format.min_num_buffers = requested_num_of_buffers; | 339 video_format.min_num_buffers = requested_num_of_buffers; |
285 video_format.coded_width = dimensions.width(); | 340 video_format.coded_width = dimensions.width(); |
286 video_format.coded_height = dimensions.height(); | 341 video_format.coded_height = dimensions.height(); |
287 // TODO(owenlin): How to get visible size? | 342 // TODO(owenlin): How to get visible size? |
288 video_format.crop_top = 0; | 343 video_format.crop_top = 0; |
289 video_format.crop_left = 0; | 344 video_format.crop_left = 0; |
290 video_format.crop_width = dimensions.width(); | 345 video_format.crop_width = dimensions.width(); |
291 video_format.crop_height = dimensions.height(); | 346 video_format.crop_height = dimensions.height(); |
292 arc_client_->OnOutputFormatChanged(video_format); | 347 arc_client_->OnOutputFormatChanged(video_format); |
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
383 ArcGpuVideoDecodeAccelerator::InputRecord* | 438 ArcGpuVideoDecodeAccelerator::InputRecord* |
384 ArcGpuVideoDecodeAccelerator::FindInputRecord(int32_t bitstream_buffer_id) { | 439 ArcGpuVideoDecodeAccelerator::FindInputRecord(int32_t bitstream_buffer_id) { |
385 for (auto& record : input_records_) { | 440 for (auto& record : input_records_) { |
386 if (record.bitstream_buffer_id == bitstream_buffer_id) | 441 if (record.bitstream_buffer_id == bitstream_buffer_id) |
387 return &record; | 442 return &record; |
388 } | 443 } |
389 return nullptr; | 444 return nullptr; |
390 } | 445 } |
391 | 446 |
392 bool ArcGpuVideoDecodeAccelerator::ValidatePortAndIndex(PortType port, | 447 bool ArcGpuVideoDecodeAccelerator::ValidatePortAndIndex(PortType port, |
393 uint32_t index) { | 448 uint32_t index) const { |
394 switch (port) { | 449 switch (port) { |
395 case PORT_INPUT: | 450 case PORT_INPUT: |
396 if (index >= input_buffer_info_.size()) { | 451 if (index >= input_buffer_info_.size()) { |
397 DLOG(ERROR) << "Invalid index: " << index; | 452 DLOG(ERROR) << "Invalid index: " << index; |
398 return false; | 453 return false; |
399 } | 454 } |
400 return true; | 455 return true; |
401 case PORT_OUTPUT: | 456 case PORT_OUTPUT: |
402 if (index >= buffers_pending_import_.size()) { | 457 if (index >= buffers_pending_import_.size()) { |
403 DLOG(ERROR) << "Invalid index: " << index; | 458 DLOG(ERROR) << "Invalid index: " << index; |
404 return false; | 459 return false; |
405 } | 460 } |
406 return true; | 461 return true; |
407 default: | 462 default: |
408 DLOG(ERROR) << "Invalid port: " << port; | 463 DLOG(ERROR) << "Invalid port: " << port; |
409 return false; | 464 return false; |
410 } | 465 } |
411 } | 466 } |
412 | 467 |
413 } // namespace arc | 468 } // namespace arc |
414 } // namespace chromeos | 469 } // namespace chromeos |
OLD | NEW |