OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/gpu/v4l2_slice_video_decode_accelerator.h" | 5 #include "media/gpu/v4l2_slice_video_decode_accelerator.h" |
6 | 6 |
7 #include <errno.h> | 7 #include <errno.h> |
8 #include <fcntl.h> | 8 #include <fcntl.h> |
9 #include <linux/videodev2.h> | 9 #include <linux/videodev2.h> |
10 #include <poll.h> | 10 #include <poll.h> |
(...skipping 208 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
219 | 219 |
220 V4L2SliceVideoDecodeAccelerator::EGLSyncKHRRef::~EGLSyncKHRRef() { | 220 V4L2SliceVideoDecodeAccelerator::EGLSyncKHRRef::~EGLSyncKHRRef() { |
221 // We don't check for eglDestroySyncKHR failures, because if we get here | 221 // We don't check for eglDestroySyncKHR failures, because if we get here |
222 // with a valid sync object, something went wrong and we are getting | 222 // with a valid sync object, something went wrong and we are getting |
223 // destroyed anyway. | 223 // destroyed anyway. |
224 if (egl_sync != EGL_NO_SYNC_KHR) | 224 if (egl_sync != EGL_NO_SYNC_KHR) |
225 eglDestroySyncKHR(egl_display, egl_sync); | 225 eglDestroySyncKHR(egl_display, egl_sync); |
226 } | 226 } |
227 | 227 |
228 struct V4L2SliceVideoDecodeAccelerator::PictureRecord { | 228 struct V4L2SliceVideoDecodeAccelerator::PictureRecord { |
229 PictureRecord(bool cleared, const media::Picture& picture); | 229 PictureRecord(bool cleared, const Picture& picture); |
230 ~PictureRecord(); | 230 ~PictureRecord(); |
231 bool cleared; // Whether the texture is cleared and safe to render from. | 231 bool cleared; // Whether the texture is cleared and safe to render from. |
232 media::Picture picture; // The decoded picture. | 232 Picture picture; // The decoded picture. |
233 }; | 233 }; |
234 | 234 |
235 V4L2SliceVideoDecodeAccelerator::PictureRecord::PictureRecord( | 235 V4L2SliceVideoDecodeAccelerator::PictureRecord::PictureRecord( |
236 bool cleared, | 236 bool cleared, |
237 const media::Picture& picture) | 237 const Picture& picture) |
238 : cleared(cleared), picture(picture) {} | 238 : cleared(cleared), picture(picture) {} |
239 | 239 |
240 V4L2SliceVideoDecodeAccelerator::PictureRecord::~PictureRecord() {} | 240 V4L2SliceVideoDecodeAccelerator::PictureRecord::~PictureRecord() {} |
241 | 241 |
242 class V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator | 242 class V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator |
243 : public H264Decoder::H264Accelerator { | 243 : public H264Decoder::H264Accelerator { |
244 public: | 244 public: |
245 V4L2H264Accelerator(V4L2SliceVideoDecodeAccelerator* v4l2_dec); | 245 V4L2H264Accelerator(V4L2SliceVideoDecodeAccelerator* v4l2_dec); |
246 ~V4L2H264Accelerator() override; | 246 ~V4L2H264Accelerator() override; |
247 | 247 |
248 // H264Decoder::H264Accelerator implementation. | 248 // H264Decoder::H264Accelerator implementation. |
249 scoped_refptr<H264Picture> CreateH264Picture() override; | 249 scoped_refptr<H264Picture> CreateH264Picture() override; |
250 | 250 |
251 bool SubmitFrameMetadata(const media::H264SPS* sps, | 251 bool SubmitFrameMetadata(const H264SPS* sps, |
252 const media::H264PPS* pps, | 252 const H264PPS* pps, |
253 const H264DPB& dpb, | 253 const H264DPB& dpb, |
254 const H264Picture::Vector& ref_pic_listp0, | 254 const H264Picture::Vector& ref_pic_listp0, |
255 const H264Picture::Vector& ref_pic_listb0, | 255 const H264Picture::Vector& ref_pic_listb0, |
256 const H264Picture::Vector& ref_pic_listb1, | 256 const H264Picture::Vector& ref_pic_listb1, |
257 const scoped_refptr<H264Picture>& pic) override; | 257 const scoped_refptr<H264Picture>& pic) override; |
258 | 258 |
259 bool SubmitSlice(const media::H264PPS* pps, | 259 bool SubmitSlice(const H264PPS* pps, |
260 const media::H264SliceHeader* slice_hdr, | 260 const H264SliceHeader* slice_hdr, |
261 const H264Picture::Vector& ref_pic_list0, | 261 const H264Picture::Vector& ref_pic_list0, |
262 const H264Picture::Vector& ref_pic_list1, | 262 const H264Picture::Vector& ref_pic_list1, |
263 const scoped_refptr<H264Picture>& pic, | 263 const scoped_refptr<H264Picture>& pic, |
264 const uint8_t* data, | 264 const uint8_t* data, |
265 size_t size) override; | 265 size_t size) override; |
266 | 266 |
267 bool SubmitDecode(const scoped_refptr<H264Picture>& pic) override; | 267 bool SubmitDecode(const scoped_refptr<H264Picture>& pic) override; |
268 bool OutputPicture(const scoped_refptr<H264Picture>& pic) override; | 268 bool OutputPicture(const scoped_refptr<H264Picture>& pic) override; |
269 | 269 |
270 void Reset() override; | 270 void Reset() override; |
(...skipping 25 matching lines...) Expand all Loading... |
296 class V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator | 296 class V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator |
297 : public VP8Decoder::VP8Accelerator { | 297 : public VP8Decoder::VP8Accelerator { |
298 public: | 298 public: |
299 V4L2VP8Accelerator(V4L2SliceVideoDecodeAccelerator* v4l2_dec); | 299 V4L2VP8Accelerator(V4L2SliceVideoDecodeAccelerator* v4l2_dec); |
300 ~V4L2VP8Accelerator() override; | 300 ~V4L2VP8Accelerator() override; |
301 | 301 |
302 // VP8Decoder::VP8Accelerator implementation. | 302 // VP8Decoder::VP8Accelerator implementation. |
303 scoped_refptr<VP8Picture> CreateVP8Picture() override; | 303 scoped_refptr<VP8Picture> CreateVP8Picture() override; |
304 | 304 |
305 bool SubmitDecode(const scoped_refptr<VP8Picture>& pic, | 305 bool SubmitDecode(const scoped_refptr<VP8Picture>& pic, |
306 const media::Vp8FrameHeader* frame_hdr, | 306 const Vp8FrameHeader* frame_hdr, |
307 const scoped_refptr<VP8Picture>& last_frame, | 307 const scoped_refptr<VP8Picture>& last_frame, |
308 const scoped_refptr<VP8Picture>& golden_frame, | 308 const scoped_refptr<VP8Picture>& golden_frame, |
309 const scoped_refptr<VP8Picture>& alt_frame) override; | 309 const scoped_refptr<VP8Picture>& alt_frame) override; |
310 | 310 |
311 bool OutputPicture(const scoped_refptr<VP8Picture>& pic) override; | 311 bool OutputPicture(const scoped_refptr<VP8Picture>& pic) override; |
312 | 312 |
313 private: | 313 private: |
314 scoped_refptr<V4L2DecodeSurface> VP8PictureToV4L2DecodeSurface( | 314 scoped_refptr<V4L2DecodeSurface> VP8PictureToV4L2DecodeSurface( |
315 const scoped_refptr<VP8Picture>& pic); | 315 const scoped_refptr<VP8Picture>& pic); |
316 | 316 |
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
385 : input_planes_count_(0), | 385 : input_planes_count_(0), |
386 output_planes_count_(0), | 386 output_planes_count_(0), |
387 child_task_runner_(base::ThreadTaskRunnerHandle::Get()), | 387 child_task_runner_(base::ThreadTaskRunnerHandle::Get()), |
388 device_(device), | 388 device_(device), |
389 decoder_thread_("V4L2SliceVideoDecodeAcceleratorThread"), | 389 decoder_thread_("V4L2SliceVideoDecodeAcceleratorThread"), |
390 device_poll_thread_("V4L2SliceVideoDecodeAcceleratorDevicePollThread"), | 390 device_poll_thread_("V4L2SliceVideoDecodeAcceleratorDevicePollThread"), |
391 input_streamon_(false), | 391 input_streamon_(false), |
392 input_buffer_queued_count_(0), | 392 input_buffer_queued_count_(0), |
393 output_streamon_(false), | 393 output_streamon_(false), |
394 output_buffer_queued_count_(0), | 394 output_buffer_queued_count_(0), |
395 video_profile_(media::VIDEO_CODEC_PROFILE_UNKNOWN), | 395 video_profile_(VIDEO_CODEC_PROFILE_UNKNOWN), |
396 output_format_fourcc_(0), | 396 output_format_fourcc_(0), |
397 state_(kUninitialized), | 397 state_(kUninitialized), |
398 output_mode_(Config::OutputMode::ALLOCATE), | 398 output_mode_(Config::OutputMode::ALLOCATE), |
399 decoder_flushing_(false), | 399 decoder_flushing_(false), |
400 decoder_resetting_(false), | 400 decoder_resetting_(false), |
401 surface_set_change_pending_(false), | 401 surface_set_change_pending_(false), |
402 picture_clearing_count_(0), | 402 picture_clearing_count_(0), |
403 egl_display_(egl_display), | 403 egl_display_(egl_display), |
404 get_gl_context_cb_(get_gl_context_cb), | 404 get_gl_context_cb_(get_gl_context_cb), |
405 make_context_current_cb_(make_context_current_cb), | 405 make_context_current_cb_(make_context_current_cb), |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
463 // TryToSetupDecodeOnSeparateThread(), use the main thread/client for | 463 // TryToSetupDecodeOnSeparateThread(), use the main thread/client for |
464 // decode tasks. | 464 // decode tasks. |
465 if (!decode_task_runner_) { | 465 if (!decode_task_runner_) { |
466 decode_task_runner_ = child_task_runner_; | 466 decode_task_runner_ = child_task_runner_; |
467 DCHECK(!decode_client_); | 467 DCHECK(!decode_client_); |
468 decode_client_ = client_; | 468 decode_client_ = client_; |
469 } | 469 } |
470 | 470 |
471 video_profile_ = config.profile; | 471 video_profile_ = config.profile; |
472 | 472 |
473 if (video_profile_ >= media::H264PROFILE_MIN && | 473 if (video_profile_ >= H264PROFILE_MIN && video_profile_ <= H264PROFILE_MAX) { |
474 video_profile_ <= media::H264PROFILE_MAX) { | |
475 h264_accelerator_.reset(new V4L2H264Accelerator(this)); | 474 h264_accelerator_.reset(new V4L2H264Accelerator(this)); |
476 decoder_.reset(new H264Decoder(h264_accelerator_.get())); | 475 decoder_.reset(new H264Decoder(h264_accelerator_.get())); |
477 } else if (video_profile_ >= media::VP8PROFILE_MIN && | 476 } else if (video_profile_ >= VP8PROFILE_MIN && |
478 video_profile_ <= media::VP8PROFILE_MAX) { | 477 video_profile_ <= VP8PROFILE_MAX) { |
479 vp8_accelerator_.reset(new V4L2VP8Accelerator(this)); | 478 vp8_accelerator_.reset(new V4L2VP8Accelerator(this)); |
480 decoder_.reset(new VP8Decoder(vp8_accelerator_.get())); | 479 decoder_.reset(new VP8Decoder(vp8_accelerator_.get())); |
481 } else { | 480 } else { |
482 NOTREACHED() << "Unsupported profile " << video_profile_; | 481 NOTREACHED() << "Unsupported profile " << video_profile_; |
483 return false; | 482 return false; |
484 } | 483 } |
485 | 484 |
486 // TODO(posciak): This needs to be queried once supported. | 485 // TODO(posciak): This needs to be queried once supported. |
487 input_planes_count_ = 1; | 486 input_planes_count_ = 1; |
488 output_planes_count_ = 1; | 487 output_planes_count_ = 1; |
(...skipping 745 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1234 // Drop all surfaces that were awaiting decode before being displayed, | 1233 // Drop all surfaces that were awaiting decode before being displayed, |
1235 // since we've just cancelled all outstanding decodes. | 1234 // since we've just cancelled all outstanding decodes. |
1236 while (!decoder_display_queue_.empty()) | 1235 while (!decoder_display_queue_.empty()) |
1237 decoder_display_queue_.pop(); | 1236 decoder_display_queue_.pop(); |
1238 | 1237 |
1239 DVLOGF(3) << "Device poll stopped"; | 1238 DVLOGF(3) << "Device poll stopped"; |
1240 return true; | 1239 return true; |
1241 } | 1240 } |
1242 | 1241 |
1243 void V4L2SliceVideoDecodeAccelerator::Decode( | 1242 void V4L2SliceVideoDecodeAccelerator::Decode( |
1244 const media::BitstreamBuffer& bitstream_buffer) { | 1243 const BitstreamBuffer& bitstream_buffer) { |
1245 DVLOGF(3) << "input_id=" << bitstream_buffer.id() | 1244 DVLOGF(3) << "input_id=" << bitstream_buffer.id() |
1246 << ", size=" << bitstream_buffer.size(); | 1245 << ", size=" << bitstream_buffer.size(); |
1247 DCHECK(decode_task_runner_->BelongsToCurrentThread()); | 1246 DCHECK(decode_task_runner_->BelongsToCurrentThread()); |
1248 | 1247 |
1249 if (bitstream_buffer.id() < 0) { | 1248 if (bitstream_buffer.id() < 0) { |
1250 LOG(ERROR) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id(); | 1249 LOG(ERROR) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id(); |
1251 if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle())) | 1250 if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle())) |
1252 base::SharedMemory::CloseHandle(bitstream_buffer.handle()); | 1251 base::SharedMemory::CloseHandle(bitstream_buffer.handle()); |
1253 NOTIFY_ERROR(INVALID_ARGUMENT); | 1252 NOTIFY_ERROR(INVALID_ARGUMENT); |
1254 return; | 1253 return; |
1255 } | 1254 } |
1256 | 1255 |
1257 decoder_thread_task_runner_->PostTask( | 1256 decoder_thread_task_runner_->PostTask( |
1258 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::DecodeTask, | 1257 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::DecodeTask, |
1259 base::Unretained(this), bitstream_buffer)); | 1258 base::Unretained(this), bitstream_buffer)); |
1260 } | 1259 } |
1261 | 1260 |
1262 void V4L2SliceVideoDecodeAccelerator::DecodeTask( | 1261 void V4L2SliceVideoDecodeAccelerator::DecodeTask( |
1263 const media::BitstreamBuffer& bitstream_buffer) { | 1262 const BitstreamBuffer& bitstream_buffer) { |
1264 DVLOGF(3) << "input_id=" << bitstream_buffer.id() | 1263 DVLOGF(3) << "input_id=" << bitstream_buffer.id() |
1265 << " size=" << bitstream_buffer.size(); | 1264 << " size=" << bitstream_buffer.size(); |
1266 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | 1265 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); |
1267 | 1266 |
1268 std::unique_ptr<BitstreamBufferRef> bitstream_record(new BitstreamBufferRef( | 1267 std::unique_ptr<BitstreamBufferRef> bitstream_record(new BitstreamBufferRef( |
1269 decode_client_, decode_task_runner_, | 1268 decode_client_, decode_task_runner_, |
1270 new SharedMemoryRegion(bitstream_buffer, true), bitstream_buffer.id())); | 1269 new SharedMemoryRegion(bitstream_buffer, true), bitstream_buffer.id())); |
1271 | 1270 |
1272 // Skip empty buffer. | 1271 // Skip empty buffer. |
1273 if (bitstream_buffer.size() == 0) | 1272 if (bitstream_buffer.size() == 0) |
(...skipping 214 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1488 memset(&reqbufs, 0, sizeof(reqbufs)); | 1487 memset(&reqbufs, 0, sizeof(reqbufs)); |
1489 reqbufs.count = 0; | 1488 reqbufs.count = 0; |
1490 reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | 1489 reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
1491 reqbufs.memory = V4L2_MEMORY_MMAP; | 1490 reqbufs.memory = V4L2_MEMORY_MMAP; |
1492 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS, &reqbufs); | 1491 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS, &reqbufs); |
1493 | 1492 |
1494 return true; | 1493 return true; |
1495 } | 1494 } |
1496 | 1495 |
1497 void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffers( | 1496 void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffers( |
1498 const std::vector<media::PictureBuffer>& buffers) { | 1497 const std::vector<PictureBuffer>& buffers) { |
1499 DVLOGF(3); | 1498 DVLOGF(3); |
1500 DCHECK(child_task_runner_->BelongsToCurrentThread()); | 1499 DCHECK(child_task_runner_->BelongsToCurrentThread()); |
1501 | 1500 |
1502 decoder_thread_task_runner_->PostTask( | 1501 decoder_thread_task_runner_->PostTask( |
1503 FROM_HERE, | 1502 FROM_HERE, |
1504 base::Bind(&V4L2SliceVideoDecodeAccelerator::AssignPictureBuffersTask, | 1503 base::Bind(&V4L2SliceVideoDecodeAccelerator::AssignPictureBuffersTask, |
1505 base::Unretained(this), buffers)); | 1504 base::Unretained(this), buffers)); |
1506 } | 1505 } |
1507 | 1506 |
1508 void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffersTask( | 1507 void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffersTask( |
1509 const std::vector<media::PictureBuffer>& buffers) { | 1508 const std::vector<PictureBuffer>& buffers) { |
1510 DVLOGF(3); | 1509 DVLOGF(3); |
1511 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | 1510 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); |
1512 DCHECK_EQ(state_, kAwaitingPictureBuffers); | 1511 DCHECK_EQ(state_, kAwaitingPictureBuffers); |
1513 | 1512 |
1514 const uint32_t req_buffer_count = decoder_->GetRequiredNumOfPictures(); | 1513 const uint32_t req_buffer_count = decoder_->GetRequiredNumOfPictures(); |
1515 | 1514 |
1516 if (buffers.size() < req_buffer_count) { | 1515 if (buffers.size() < req_buffer_count) { |
1517 DLOG(ERROR) << "Failed to provide requested picture buffers. " | 1516 DLOG(ERROR) << "Failed to provide requested picture buffers. " |
1518 << "(Got " << buffers.size() | 1517 << "(Got " << buffers.size() |
1519 << ", requested " << req_buffer_count << ")"; | 1518 << ", requested " << req_buffer_count << ")"; |
(...skipping 529 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2049 entry.frame_num = pic->frame_num; | 2048 entry.frame_num = pic->frame_num; |
2050 entry.pic_num = pic->pic_num; | 2049 entry.pic_num = pic->pic_num; |
2051 entry.top_field_order_cnt = pic->top_field_order_cnt; | 2050 entry.top_field_order_cnt = pic->top_field_order_cnt; |
2052 entry.bottom_field_order_cnt = pic->bottom_field_order_cnt; | 2051 entry.bottom_field_order_cnt = pic->bottom_field_order_cnt; |
2053 entry.flags = (pic->ref ? V4L2_H264_DPB_ENTRY_FLAG_ACTIVE : 0) | | 2052 entry.flags = (pic->ref ? V4L2_H264_DPB_ENTRY_FLAG_ACTIVE : 0) | |
2054 (pic->long_term ? V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM : 0); | 2053 (pic->long_term ? V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM : 0); |
2055 } | 2054 } |
2056 } | 2055 } |
2057 | 2056 |
2058 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitFrameMetadata( | 2057 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitFrameMetadata( |
2059 const media::H264SPS* sps, | 2058 const H264SPS* sps, |
2060 const media::H264PPS* pps, | 2059 const H264PPS* pps, |
2061 const H264DPB& dpb, | 2060 const H264DPB& dpb, |
2062 const H264Picture::Vector& ref_pic_listp0, | 2061 const H264Picture::Vector& ref_pic_listp0, |
2063 const H264Picture::Vector& ref_pic_listb0, | 2062 const H264Picture::Vector& ref_pic_listb0, |
2064 const H264Picture::Vector& ref_pic_listb1, | 2063 const H264Picture::Vector& ref_pic_listb1, |
2065 const scoped_refptr<H264Picture>& pic) { | 2064 const scoped_refptr<H264Picture>& pic) { |
2066 struct v4l2_ext_control ctrl; | 2065 struct v4l2_ext_control ctrl; |
2067 std::vector<struct v4l2_ext_control> ctrls; | 2066 std::vector<struct v4l2_ext_control> ctrls; |
2068 | 2067 |
2069 struct v4l2_ctrl_h264_sps v4l2_sps; | 2068 struct v4l2_ctrl_h264_sps v4l2_sps; |
2070 memset(&v4l2_sps, 0, sizeof(v4l2_sps)); | 2069 memset(&v4l2_sps, 0, sizeof(v4l2_sps)); |
(...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2208 v4l2_decode_param_.ref_pic_list_b1); | 2207 v4l2_decode_param_.ref_pic_list_b1); |
2209 | 2208 |
2210 std::vector<scoped_refptr<V4L2DecodeSurface>> ref_surfaces; | 2209 std::vector<scoped_refptr<V4L2DecodeSurface>> ref_surfaces; |
2211 H264DPBToV4L2DPB(dpb, &ref_surfaces); | 2210 H264DPBToV4L2DPB(dpb, &ref_surfaces); |
2212 dec_surface->SetReferenceSurfaces(ref_surfaces); | 2211 dec_surface->SetReferenceSurfaces(ref_surfaces); |
2213 | 2212 |
2214 return true; | 2213 return true; |
2215 } | 2214 } |
2216 | 2215 |
2217 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitSlice( | 2216 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitSlice( |
2218 const media::H264PPS* pps, | 2217 const H264PPS* pps, |
2219 const media::H264SliceHeader* slice_hdr, | 2218 const H264SliceHeader* slice_hdr, |
2220 const H264Picture::Vector& ref_pic_list0, | 2219 const H264Picture::Vector& ref_pic_list0, |
2221 const H264Picture::Vector& ref_pic_list1, | 2220 const H264Picture::Vector& ref_pic_list1, |
2222 const scoped_refptr<H264Picture>& pic, | 2221 const scoped_refptr<H264Picture>& pic, |
2223 const uint8_t* data, | 2222 const uint8_t* data, |
2224 size_t size) { | 2223 size_t size) { |
2225 if (num_slices_ == kMaxSlices) { | 2224 if (num_slices_ == kMaxSlices) { |
2226 LOGF(ERROR) << "Over limit of supported slices per frame"; | 2225 LOGF(ERROR) << "Over limit of supported slices per frame"; |
2227 return false; | 2226 return false; |
2228 } | 2227 } |
2229 | 2228 |
(...skipping 209 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2439 } | 2438 } |
2440 | 2439 |
2441 #define ARRAY_MEMCPY_CHECKED(to, from) \ | 2440 #define ARRAY_MEMCPY_CHECKED(to, from) \ |
2442 do { \ | 2441 do { \ |
2443 static_assert(sizeof(to) == sizeof(from), \ | 2442 static_assert(sizeof(to) == sizeof(from), \ |
2444 #from " and " #to " arrays must be of same size"); \ | 2443 #from " and " #to " arrays must be of same size"); \ |
2445 memcpy(to, from, sizeof(to)); \ | 2444 memcpy(to, from, sizeof(to)); \ |
2446 } while (0) | 2445 } while (0) |
2447 | 2446 |
2448 static void FillV4L2SegmentationHeader( | 2447 static void FillV4L2SegmentationHeader( |
2449 const media::Vp8SegmentationHeader& vp8_sgmnt_hdr, | 2448 const Vp8SegmentationHeader& vp8_sgmnt_hdr, |
2450 struct v4l2_vp8_sgmnt_hdr* v4l2_sgmnt_hdr) { | 2449 struct v4l2_vp8_sgmnt_hdr* v4l2_sgmnt_hdr) { |
2451 #define SET_V4L2_SGMNT_HDR_FLAG_IF(cond, flag) \ | 2450 #define SET_V4L2_SGMNT_HDR_FLAG_IF(cond, flag) \ |
2452 v4l2_sgmnt_hdr->flags |= ((vp8_sgmnt_hdr.cond) ? (flag) : 0) | 2451 v4l2_sgmnt_hdr->flags |= ((vp8_sgmnt_hdr.cond) ? (flag) : 0) |
2453 SET_V4L2_SGMNT_HDR_FLAG_IF(segmentation_enabled, | 2452 SET_V4L2_SGMNT_HDR_FLAG_IF(segmentation_enabled, |
2454 V4L2_VP8_SEGMNT_HDR_FLAG_ENABLED); | 2453 V4L2_VP8_SEGMNT_HDR_FLAG_ENABLED); |
2455 SET_V4L2_SGMNT_HDR_FLAG_IF(update_mb_segmentation_map, | 2454 SET_V4L2_SGMNT_HDR_FLAG_IF(update_mb_segmentation_map, |
2456 V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_MAP); | 2455 V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_MAP); |
2457 SET_V4L2_SGMNT_HDR_FLAG_IF(update_segment_feature_data, | 2456 SET_V4L2_SGMNT_HDR_FLAG_IF(update_segment_feature_data, |
2458 V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_FEATURE_DATA); | 2457 V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_FEATURE_DATA); |
2459 #undef SET_V4L2_SPARM_FLAG_IF | 2458 #undef SET_V4L2_SPARM_FLAG_IF |
2460 v4l2_sgmnt_hdr->segment_feature_mode = vp8_sgmnt_hdr.segment_feature_mode; | 2459 v4l2_sgmnt_hdr->segment_feature_mode = vp8_sgmnt_hdr.segment_feature_mode; |
2461 | 2460 |
2462 ARRAY_MEMCPY_CHECKED(v4l2_sgmnt_hdr->quant_update, | 2461 ARRAY_MEMCPY_CHECKED(v4l2_sgmnt_hdr->quant_update, |
2463 vp8_sgmnt_hdr.quantizer_update_value); | 2462 vp8_sgmnt_hdr.quantizer_update_value); |
2464 ARRAY_MEMCPY_CHECKED(v4l2_sgmnt_hdr->lf_update, | 2463 ARRAY_MEMCPY_CHECKED(v4l2_sgmnt_hdr->lf_update, |
2465 vp8_sgmnt_hdr.lf_update_value); | 2464 vp8_sgmnt_hdr.lf_update_value); |
2466 ARRAY_MEMCPY_CHECKED(v4l2_sgmnt_hdr->segment_probs, | 2465 ARRAY_MEMCPY_CHECKED(v4l2_sgmnt_hdr->segment_probs, |
2467 vp8_sgmnt_hdr.segment_prob); | 2466 vp8_sgmnt_hdr.segment_prob); |
2468 } | 2467 } |
2469 | 2468 |
2470 static void FillV4L2LoopfilterHeader( | 2469 static void FillV4L2LoopfilterHeader( |
2471 const media::Vp8LoopFilterHeader& vp8_loopfilter_hdr, | 2470 const Vp8LoopFilterHeader& vp8_loopfilter_hdr, |
2472 struct v4l2_vp8_loopfilter_hdr* v4l2_lf_hdr) { | 2471 struct v4l2_vp8_loopfilter_hdr* v4l2_lf_hdr) { |
2473 #define SET_V4L2_LF_HDR_FLAG_IF(cond, flag) \ | 2472 #define SET_V4L2_LF_HDR_FLAG_IF(cond, flag) \ |
2474 v4l2_lf_hdr->flags |= ((vp8_loopfilter_hdr.cond) ? (flag) : 0) | 2473 v4l2_lf_hdr->flags |= ((vp8_loopfilter_hdr.cond) ? (flag) : 0) |
2475 SET_V4L2_LF_HDR_FLAG_IF(loop_filter_adj_enable, V4L2_VP8_LF_HDR_ADJ_ENABLE); | 2474 SET_V4L2_LF_HDR_FLAG_IF(loop_filter_adj_enable, V4L2_VP8_LF_HDR_ADJ_ENABLE); |
2476 SET_V4L2_LF_HDR_FLAG_IF(mode_ref_lf_delta_update, | 2475 SET_V4L2_LF_HDR_FLAG_IF(mode_ref_lf_delta_update, |
2477 V4L2_VP8_LF_HDR_DELTA_UPDATE); | 2476 V4L2_VP8_LF_HDR_DELTA_UPDATE); |
2478 #undef SET_V4L2_SGMNT_HDR_FLAG_IF | 2477 #undef SET_V4L2_SGMNT_HDR_FLAG_IF |
2479 | 2478 |
2480 #define LF_HDR_TO_V4L2_LF_HDR(a) v4l2_lf_hdr->a = vp8_loopfilter_hdr.a; | 2479 #define LF_HDR_TO_V4L2_LF_HDR(a) v4l2_lf_hdr->a = vp8_loopfilter_hdr.a; |
2481 LF_HDR_TO_V4L2_LF_HDR(type); | 2480 LF_HDR_TO_V4L2_LF_HDR(type); |
2482 LF_HDR_TO_V4L2_LF_HDR(level); | 2481 LF_HDR_TO_V4L2_LF_HDR(level); |
2483 LF_HDR_TO_V4L2_LF_HDR(sharpness_level); | 2482 LF_HDR_TO_V4L2_LF_HDR(sharpness_level); |
2484 #undef LF_HDR_TO_V4L2_LF_HDR | 2483 #undef LF_HDR_TO_V4L2_LF_HDR |
2485 | 2484 |
2486 ARRAY_MEMCPY_CHECKED(v4l2_lf_hdr->ref_frm_delta_magnitude, | 2485 ARRAY_MEMCPY_CHECKED(v4l2_lf_hdr->ref_frm_delta_magnitude, |
2487 vp8_loopfilter_hdr.ref_frame_delta); | 2486 vp8_loopfilter_hdr.ref_frame_delta); |
2488 ARRAY_MEMCPY_CHECKED(v4l2_lf_hdr->mb_mode_delta_magnitude, | 2487 ARRAY_MEMCPY_CHECKED(v4l2_lf_hdr->mb_mode_delta_magnitude, |
2489 vp8_loopfilter_hdr.mb_mode_delta); | 2488 vp8_loopfilter_hdr.mb_mode_delta); |
2490 } | 2489 } |
2491 | 2490 |
2492 static void FillV4L2QuantizationHeader( | 2491 static void FillV4L2QuantizationHeader( |
2493 const media::Vp8QuantizationHeader& vp8_quant_hdr, | 2492 const Vp8QuantizationHeader& vp8_quant_hdr, |
2494 struct v4l2_vp8_quantization_hdr* v4l2_quant_hdr) { | 2493 struct v4l2_vp8_quantization_hdr* v4l2_quant_hdr) { |
2495 v4l2_quant_hdr->y_ac_qi = vp8_quant_hdr.y_ac_qi; | 2494 v4l2_quant_hdr->y_ac_qi = vp8_quant_hdr.y_ac_qi; |
2496 v4l2_quant_hdr->y_dc_delta = vp8_quant_hdr.y_dc_delta; | 2495 v4l2_quant_hdr->y_dc_delta = vp8_quant_hdr.y_dc_delta; |
2497 v4l2_quant_hdr->y2_dc_delta = vp8_quant_hdr.y2_dc_delta; | 2496 v4l2_quant_hdr->y2_dc_delta = vp8_quant_hdr.y2_dc_delta; |
2498 v4l2_quant_hdr->y2_ac_delta = vp8_quant_hdr.y2_ac_delta; | 2497 v4l2_quant_hdr->y2_ac_delta = vp8_quant_hdr.y2_ac_delta; |
2499 v4l2_quant_hdr->uv_dc_delta = vp8_quant_hdr.uv_dc_delta; | 2498 v4l2_quant_hdr->uv_dc_delta = vp8_quant_hdr.uv_dc_delta; |
2500 v4l2_quant_hdr->uv_ac_delta = vp8_quant_hdr.uv_ac_delta; | 2499 v4l2_quant_hdr->uv_ac_delta = vp8_quant_hdr.uv_ac_delta; |
2501 } | 2500 } |
2502 | 2501 |
2503 static void FillV4L2EntropyHeader( | 2502 static void FillV4L2EntropyHeader( |
2504 const media::Vp8EntropyHeader& vp8_entropy_hdr, | 2503 const Vp8EntropyHeader& vp8_entropy_hdr, |
2505 struct v4l2_vp8_entropy_hdr* v4l2_entropy_hdr) { | 2504 struct v4l2_vp8_entropy_hdr* v4l2_entropy_hdr) { |
2506 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr->coeff_probs, | 2505 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr->coeff_probs, |
2507 vp8_entropy_hdr.coeff_probs); | 2506 vp8_entropy_hdr.coeff_probs); |
2508 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr->y_mode_probs, | 2507 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr->y_mode_probs, |
2509 vp8_entropy_hdr.y_mode_probs); | 2508 vp8_entropy_hdr.y_mode_probs); |
2510 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr->uv_mode_probs, | 2509 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr->uv_mode_probs, |
2511 vp8_entropy_hdr.uv_mode_probs); | 2510 vp8_entropy_hdr.uv_mode_probs); |
2512 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr->mv_probs, vp8_entropy_hdr.mv_probs); | 2511 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr->mv_probs, vp8_entropy_hdr.mv_probs); |
2513 } | 2512 } |
2514 | 2513 |
2515 bool V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::SubmitDecode( | 2514 bool V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::SubmitDecode( |
2516 const scoped_refptr<VP8Picture>& pic, | 2515 const scoped_refptr<VP8Picture>& pic, |
2517 const media::Vp8FrameHeader* frame_hdr, | 2516 const Vp8FrameHeader* frame_hdr, |
2518 const scoped_refptr<VP8Picture>& last_frame, | 2517 const scoped_refptr<VP8Picture>& last_frame, |
2519 const scoped_refptr<VP8Picture>& golden_frame, | 2518 const scoped_refptr<VP8Picture>& golden_frame, |
2520 const scoped_refptr<VP8Picture>& alt_frame) { | 2519 const scoped_refptr<VP8Picture>& alt_frame) { |
2521 struct v4l2_ctrl_vp8_frame_hdr v4l2_frame_hdr; | 2520 struct v4l2_ctrl_vp8_frame_hdr v4l2_frame_hdr; |
2522 memset(&v4l2_frame_hdr, 0, sizeof(v4l2_frame_hdr)); | 2521 memset(&v4l2_frame_hdr, 0, sizeof(v4l2_frame_hdr)); |
2523 | 2522 |
2524 #define FHDR_TO_V4L2_FHDR(a) v4l2_frame_hdr.a = frame_hdr->a | 2523 #define FHDR_TO_V4L2_FHDR(a) v4l2_frame_hdr.a = frame_hdr->a |
2525 FHDR_TO_V4L2_FHDR(key_frame); | 2524 FHDR_TO_V4L2_FHDR(key_frame); |
2526 FHDR_TO_V4L2_FHDR(version); | 2525 FHDR_TO_V4L2_FHDR(version); |
2527 FHDR_TO_V4L2_FHDR(width); | 2526 FHDR_TO_V4L2_FHDR(width); |
(...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2691 DCHECK(inserted); | 2690 DCHECK(inserted); |
2692 | 2691 |
2693 DCHECK(!output_record.at_client); | 2692 DCHECK(!output_record.at_client); |
2694 DCHECK(!output_record.at_device); | 2693 DCHECK(!output_record.at_device); |
2695 DCHECK_NE(output_record.picture_id, -1); | 2694 DCHECK_NE(output_record.picture_id, -1); |
2696 output_record.at_client = true; | 2695 output_record.at_client = true; |
2697 | 2696 |
2698 // TODO(posciak): Use visible size from decoder here instead | 2697 // TODO(posciak): Use visible size from decoder here instead |
2699 // (crbug.com/402760). Passing (0, 0) results in the client using the | 2698 // (crbug.com/402760). Passing (0, 0) results in the client using the |
2700 // visible size extracted from the container instead. | 2699 // visible size extracted from the container instead. |
2701 media::Picture picture(output_record.picture_id, dec_surface->bitstream_id(), | 2700 Picture picture(output_record.picture_id, dec_surface->bitstream_id(), |
2702 gfx::Rect(0, 0), false); | 2701 gfx::Rect(0, 0), false); |
2703 DVLOGF(3) << dec_surface->ToString() | 2702 DVLOGF(3) << dec_surface->ToString() |
2704 << ", bitstream_id: " << picture.bitstream_buffer_id() | 2703 << ", bitstream_id: " << picture.bitstream_buffer_id() |
2705 << ", picture_id: " << picture.picture_buffer_id(); | 2704 << ", picture_id: " << picture.picture_buffer_id(); |
2706 pending_picture_ready_.push(PictureRecord(output_record.cleared, picture)); | 2705 pending_picture_ready_.push(PictureRecord(output_record.cleared, picture)); |
2707 SendPictureReady(); | 2706 SendPictureReady(); |
2708 output_record.cleared = true; | 2707 output_record.cleared = true; |
2709 } | 2708 } |
2710 | 2709 |
2711 scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface> | 2710 scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface> |
2712 V4L2SliceVideoDecodeAccelerator::CreateSurface() { | 2711 V4L2SliceVideoDecodeAccelerator::CreateSurface() { |
(...skipping 22 matching lines...) Expand all Loading... |
2735 DVLOGF(4) << "Created surface " << input << " -> " << output; | 2734 DVLOGF(4) << "Created surface " << input << " -> " << output; |
2736 return dec_surface; | 2735 return dec_surface; |
2737 } | 2736 } |
2738 | 2737 |
2739 void V4L2SliceVideoDecodeAccelerator::SendPictureReady() { | 2738 void V4L2SliceVideoDecodeAccelerator::SendPictureReady() { |
2740 DVLOGF(3); | 2739 DVLOGF(3); |
2741 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | 2740 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); |
2742 bool resetting_or_flushing = (decoder_resetting_ || decoder_flushing_); | 2741 bool resetting_or_flushing = (decoder_resetting_ || decoder_flushing_); |
2743 while (!pending_picture_ready_.empty()) { | 2742 while (!pending_picture_ready_.empty()) { |
2744 bool cleared = pending_picture_ready_.front().cleared; | 2743 bool cleared = pending_picture_ready_.front().cleared; |
2745 const media::Picture& picture = pending_picture_ready_.front().picture; | 2744 const Picture& picture = pending_picture_ready_.front().picture; |
2746 if (cleared && picture_clearing_count_ == 0) { | 2745 if (cleared && picture_clearing_count_ == 0) { |
2747 DVLOGF(4) << "Posting picture ready to decode task runner for: " | 2746 DVLOGF(4) << "Posting picture ready to decode task runner for: " |
2748 << picture.picture_buffer_id(); | 2747 << picture.picture_buffer_id(); |
2749 // This picture is cleared. It can be posted to a thread different than | 2748 // This picture is cleared. It can be posted to a thread different than |
2750 // the main GPU thread to reduce latency. This should be the case after | 2749 // the main GPU thread to reduce latency. This should be the case after |
2751 // all pictures are cleared at the beginning. | 2750 // all pictures are cleared at the beginning. |
2752 decode_task_runner_->PostTask( | 2751 decode_task_runner_->PostTask( |
2753 FROM_HERE, | 2752 FROM_HERE, |
2754 base::Bind(&Client::PictureReady, decode_client_, picture)); | 2753 base::Bind(&Client::PictureReady, decode_client_, picture)); |
2755 pending_picture_ready_.pop(); | 2754 pending_picture_ready_.pop(); |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2791 | 2790 |
2792 bool V4L2SliceVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread( | 2791 bool V4L2SliceVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread( |
2793 const base::WeakPtr<Client>& decode_client, | 2792 const base::WeakPtr<Client>& decode_client, |
2794 const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) { | 2793 const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) { |
2795 decode_client_ = decode_client; | 2794 decode_client_ = decode_client; |
2796 decode_task_runner_ = decode_task_runner; | 2795 decode_task_runner_ = decode_task_runner; |
2797 return true; | 2796 return true; |
2798 } | 2797 } |
2799 | 2798 |
2800 // static | 2799 // static |
2801 media::VideoDecodeAccelerator::SupportedProfiles | 2800 VideoDecodeAccelerator::SupportedProfiles |
2802 V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles() { | 2801 V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles() { |
2803 scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder); | 2802 scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder); |
2804 if (!device) | 2803 if (!device) |
2805 return SupportedProfiles(); | 2804 return SupportedProfiles(); |
2806 | 2805 |
2807 return device->GetSupportedDecodeProfiles(arraysize(supported_input_fourccs_), | 2806 return device->GetSupportedDecodeProfiles(arraysize(supported_input_fourccs_), |
2808 supported_input_fourccs_); | 2807 supported_input_fourccs_); |
2809 } | 2808 } |
2810 | 2809 |
2811 } // namespace media | 2810 } // namespace media |
OLD | NEW |