Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(76)

Side by Side Diff: content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc

Issue 1822983002: Support external buffer import in VDA interface and add a V4L2SVDA impl. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <errno.h> 5 #include <errno.h>
6 #include <fcntl.h> 6 #include <fcntl.h>
7 #include <linux/videodev2.h> 7 #include <linux/videodev2.h>
8 #include <poll.h> 8 #include <poll.h>
9 #include <string.h> 9 #include <string.h>
10 #include <sys/eventfd.h> 10 #include <sys/eventfd.h>
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after
155 address(nullptr), 155 address(nullptr),
156 length(0), 156 length(0),
157 bytes_used(0), 157 bytes_used(0),
158 at_device(false) { 158 at_device(false) {
159 } 159 }
160 160
161 V4L2SliceVideoDecodeAccelerator::OutputRecord::OutputRecord() 161 V4L2SliceVideoDecodeAccelerator::OutputRecord::OutputRecord()
162 : at_device(false), 162 : at_device(false),
163 at_client(false), 163 at_client(false),
164 picture_id(-1), 164 picture_id(-1),
165 texture_id(0),
165 egl_image(EGL_NO_IMAGE_KHR), 166 egl_image(EGL_NO_IMAGE_KHR),
166 egl_sync(EGL_NO_SYNC_KHR), 167 egl_sync(EGL_NO_SYNC_KHR),
167 cleared(false) { 168 cleared(false) {}
168 }
169 169
170 struct V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef { 170 struct V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef {
171 BitstreamBufferRef( 171 BitstreamBufferRef(
172 base::WeakPtr<VideoDecodeAccelerator::Client>& client, 172 base::WeakPtr<VideoDecodeAccelerator::Client>& client,
173 const scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner, 173 const scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner,
174 SharedMemoryRegion* shm, 174 SharedMemoryRegion* shm,
175 int32_t input_id); 175 int32_t input_id);
176 ~BitstreamBufferRef(); 176 ~BitstreamBufferRef();
177 const base::WeakPtr<VideoDecodeAccelerator::Client> client; 177 const base::WeakPtr<VideoDecodeAccelerator::Client> client;
178 const scoped_refptr<base::SingleThreadTaskRunner> client_task_runner; 178 const scoped_refptr<base::SingleThreadTaskRunner> client_task_runner;
(...skipping 209 matching lines...) Expand 10 before | Expand all | Expand 10 after
388 device_(device), 388 device_(device),
389 decoder_thread_("V4L2SliceVideoDecodeAcceleratorThread"), 389 decoder_thread_("V4L2SliceVideoDecodeAcceleratorThread"),
390 device_poll_thread_("V4L2SliceVideoDecodeAcceleratorDevicePollThread"), 390 device_poll_thread_("V4L2SliceVideoDecodeAcceleratorDevicePollThread"),
391 input_streamon_(false), 391 input_streamon_(false),
392 input_buffer_queued_count_(0), 392 input_buffer_queued_count_(0),
393 output_streamon_(false), 393 output_streamon_(false),
394 output_buffer_queued_count_(0), 394 output_buffer_queued_count_(0),
395 video_profile_(media::VIDEO_CODEC_PROFILE_UNKNOWN), 395 video_profile_(media::VIDEO_CODEC_PROFILE_UNKNOWN),
396 output_format_fourcc_(0), 396 output_format_fourcc_(0),
397 state_(kUninitialized), 397 state_(kUninitialized),
398 output_mode_(Config::OutputMode::ALLOCATE),
398 decoder_flushing_(false), 399 decoder_flushing_(false),
399 decoder_resetting_(false), 400 decoder_resetting_(false),
400 surface_set_change_pending_(false), 401 surface_set_change_pending_(false),
401 picture_clearing_count_(0), 402 picture_clearing_count_(0),
402 egl_display_(egl_display), 403 egl_display_(egl_display),
403 get_gl_context_cb_(get_gl_context_cb), 404 get_gl_context_cb_(get_gl_context_cb),
404 make_context_current_cb_(make_context_current_cb), 405 make_context_current_cb_(make_context_current_cb),
405 weak_this_factory_(this) { 406 weak_this_factory_(this) {
406 weak_this_ = weak_this_factory_.GetWeakPtr(); 407 weak_this_ = weak_this_factory_.GetWeakPtr();
407 } 408 }
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after
514 if (!SetupFormats()) 515 if (!SetupFormats())
515 return false; 516 return false;
516 517
517 if (!decoder_thread_.Start()) { 518 if (!decoder_thread_.Start()) {
518 DLOG(ERROR) << "Initialize(): device thread failed to start"; 519 DLOG(ERROR) << "Initialize(): device thread failed to start";
519 return false; 520 return false;
520 } 521 }
521 decoder_thread_task_runner_ = decoder_thread_.task_runner(); 522 decoder_thread_task_runner_ = decoder_thread_.task_runner();
522 523
523 state_ = kInitialized; 524 state_ = kInitialized;
525 output_mode_ = config.output_mode;
kcwu 2016/03/22 05:42:54 Should we validate the value here?
Owen Lin 2016/03/23 06:32:50 Maybe not, we will set this in ArcGVDA. I think Ar
Pawel Osciak 2016/03/28 01:31:29 It's an enum class, but I guess new values may be
524 526
525 // InitializeTask will NOTIFY_ERROR on failure. 527 // InitializeTask will NOTIFY_ERROR on failure.
526 decoder_thread_task_runner_->PostTask( 528 decoder_thread_task_runner_->PostTask(
527 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::InitializeTask, 529 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::InitializeTask,
528 base::Unretained(this))); 530 base::Unretained(this)));
529 531
530 DVLOGF(1) << "V4L2SliceVideoDecodeAccelerator initialized"; 532 DVLOGF(1) << "V4L2SliceVideoDecodeAccelerator initialized";
531 return true; 533 return true;
532 } 534 }
533 535
(...skipping 353 matching lines...) Expand 10 before | Expand all | Expand 10 after
887 DVLOGF(3); 889 DVLOGF(3);
888 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); 890 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
889 891
890 struct v4l2_buffer dqbuf; 892 struct v4l2_buffer dqbuf;
891 struct v4l2_plane planes[VIDEO_MAX_PLANES]; 893 struct v4l2_plane planes[VIDEO_MAX_PLANES];
892 while (input_buffer_queued_count_ > 0) { 894 while (input_buffer_queued_count_ > 0) {
893 DCHECK(input_streamon_); 895 DCHECK(input_streamon_);
894 memset(&dqbuf, 0, sizeof(dqbuf)); 896 memset(&dqbuf, 0, sizeof(dqbuf));
895 memset(&planes, 0, sizeof(planes)); 897 memset(&planes, 0, sizeof(planes));
896 dqbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; 898 dqbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
897 dqbuf.memory = V4L2_MEMORY_USERPTR; 899 dqbuf.memory = V4L2_MEMORY_MMAP;
898 dqbuf.m.planes = planes; 900 dqbuf.m.planes = planes;
899 dqbuf.length = input_planes_count_; 901 dqbuf.length = input_planes_count_;
900 if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) { 902 if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) {
901 if (errno == EAGAIN) { 903 if (errno == EAGAIN) {
902 // EAGAIN if we're just out of buffers to dequeue. 904 // EAGAIN if we're just out of buffers to dequeue.
903 break; 905 break;
904 } 906 }
905 PLOG(ERROR) << "ioctl() failed: VIDIOC_DQBUF"; 907 PLOG(ERROR) << "ioctl() failed: VIDIOC_DQBUF";
906 NOTIFY_ERROR(PLATFORM_FAILURE); 908 NOTIFY_ERROR(PLATFORM_FAILURE);
907 return; 909 return;
908 } 910 }
909 InputRecord& input_record = input_buffer_map_[dqbuf.index]; 911 InputRecord& input_record = input_buffer_map_[dqbuf.index];
910 DCHECK(input_record.at_device); 912 DCHECK(input_record.at_device);
911 input_record.at_device = false; 913 input_record.at_device = false;
912 ReuseInputBuffer(dqbuf.index); 914 ReuseInputBuffer(dqbuf.index);
913 input_buffer_queued_count_--; 915 input_buffer_queued_count_--;
914 DVLOGF(4) << "Dequeued input=" << dqbuf.index 916 DVLOGF(4) << "Dequeued input=" << dqbuf.index
915 << " count: " << input_buffer_queued_count_; 917 << " count: " << input_buffer_queued_count_;
916 } 918 }
917 919
918 while (output_buffer_queued_count_ > 0) { 920 while (output_buffer_queued_count_ > 0) {
919 DCHECK(output_streamon_); 921 DCHECK(output_streamon_);
920 memset(&dqbuf, 0, sizeof(dqbuf)); 922 memset(&dqbuf, 0, sizeof(dqbuf));
921 memset(&planes, 0, sizeof(planes)); 923 memset(&planes, 0, sizeof(planes));
922 dqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; 924 dqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
923 dqbuf.memory = V4L2_MEMORY_MMAP; 925 if (output_mode_ == Config::OutputMode::ALLOCATE)
926 dqbuf.memory = V4L2_MEMORY_MMAP;
Owen Lin 2016/03/23 06:32:50 use the ternary operator: "? :"
Pawel Osciak 2016/03/28 01:31:28 Done.
927 else
928 dqbuf.memory = V4L2_MEMORY_DMABUF;
924 dqbuf.m.planes = planes; 929 dqbuf.m.planes = planes;
925 dqbuf.length = output_planes_count_; 930 dqbuf.length = output_planes_count_;
926 if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) { 931 if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) {
927 if (errno == EAGAIN) { 932 if (errno == EAGAIN) {
928 // EAGAIN if we're just out of buffers to dequeue. 933 // EAGAIN if we're just out of buffers to dequeue.
929 break; 934 break;
930 } 935 }
931 PLOG(ERROR) << "ioctl() failed: VIDIOC_DQBUF"; 936 PLOG(ERROR) << "ioctl() failed: VIDIOC_DQBUF";
932 NOTIFY_ERROR(PLATFORM_FAILURE); 937 NOTIFY_ERROR(PLATFORM_FAILURE);
933 return; 938 return;
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
1049 } 1054 }
1050 1055
1051 bool V4L2SliceVideoDecodeAccelerator::EnqueueOutputRecord(int index) { 1056 bool V4L2SliceVideoDecodeAccelerator::EnqueueOutputRecord(int index) {
1052 DVLOGF(3); 1057 DVLOGF(3);
1053 DCHECK_LT(index, static_cast<int>(output_buffer_map_.size())); 1058 DCHECK_LT(index, static_cast<int>(output_buffer_map_.size()));
1054 1059
1055 // Enqueue an output (VIDEO_CAPTURE) buffer. 1060 // Enqueue an output (VIDEO_CAPTURE) buffer.
1056 OutputRecord& output_record = output_buffer_map_[index]; 1061 OutputRecord& output_record = output_buffer_map_[index];
1057 DCHECK(!output_record.at_device); 1062 DCHECK(!output_record.at_device);
1058 DCHECK(!output_record.at_client); 1063 DCHECK(!output_record.at_client);
1059 DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR);
1060 DCHECK_NE(output_record.picture_id, -1); 1064 DCHECK_NE(output_record.picture_id, -1);
1061 1065
1062 if (output_record.egl_sync != EGL_NO_SYNC_KHR) { 1066 if (output_record.egl_sync != EGL_NO_SYNC_KHR) {
1063 // If we have to wait for completion, wait. Note that 1067 // If we have to wait for completion, wait. Note that
1064 // free_output_buffers_ is a FIFO queue, so we always wait on the 1068 // free_output_buffers_ is a FIFO queue, so we always wait on the
1065 // buffer that has been in the queue the longest. 1069 // buffer that has been in the queue the longest.
1066 if (eglClientWaitSyncKHR(egl_display_, output_record.egl_sync, 0, 1070 if (eglClientWaitSyncKHR(egl_display_, output_record.egl_sync, 0,
1067 EGL_FOREVER_KHR) == EGL_FALSE) { 1071 EGL_FOREVER_KHR) == EGL_FALSE) {
1068 // This will cause tearing, but is safe otherwise. 1072 // This will cause tearing, but is safe otherwise.
1069 DVLOGF(1) << "eglClientWaitSyncKHR failed!"; 1073 DVLOGF(1) << "eglClientWaitSyncKHR failed!";
1070 } 1074 }
1071 if (eglDestroySyncKHR(egl_display_, output_record.egl_sync) != EGL_TRUE) { 1075 if (eglDestroySyncKHR(egl_display_, output_record.egl_sync) != EGL_TRUE) {
1072 LOGF(ERROR) << "eglDestroySyncKHR failed!"; 1076 LOGF(ERROR) << "eglDestroySyncKHR failed!";
1073 NOTIFY_ERROR(PLATFORM_FAILURE); 1077 NOTIFY_ERROR(PLATFORM_FAILURE);
1074 return false; 1078 return false;
1075 } 1079 }
1076 output_record.egl_sync = EGL_NO_SYNC_KHR; 1080 output_record.egl_sync = EGL_NO_SYNC_KHR;
1077 } 1081 }
1078 1082
1079 struct v4l2_buffer qbuf; 1083 struct v4l2_buffer qbuf;
1080 struct v4l2_plane qbuf_planes[VIDEO_MAX_PLANES]; 1084 struct v4l2_plane qbuf_planes[VIDEO_MAX_PLANES];
1081 memset(&qbuf, 0, sizeof(qbuf)); 1085 memset(&qbuf, 0, sizeof(qbuf));
1082 memset(qbuf_planes, 0, sizeof(qbuf_planes)); 1086 memset(qbuf_planes, 0, sizeof(qbuf_planes));
1083 qbuf.index = index; 1087 qbuf.index = index;
1084 qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; 1088 qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1085 qbuf.memory = V4L2_MEMORY_MMAP; 1089 if (output_mode_ == Config::OutputMode::ALLOCATE) {
1090 qbuf.memory = V4L2_MEMORY_MMAP;
1091 } else {
1092 qbuf.memory = V4L2_MEMORY_DMABUF;
1093 DCHECK_EQ(output_planes_count_, output_record.dmabuf_fds.size());
1094 for (size_t i = 0; i < output_record.dmabuf_fds.size(); ++i) {
1095 DCHECK_NE(output_record.dmabuf_fds[i].get(), -1);
Owen Lin 2016/03/23 06:32:50 DCHECK(output_record.dmabuf_fds[i].is_valid());
Pawel Osciak 2016/03/28 01:31:29 Done.
1096 qbuf_planes[i].m.fd = output_record.dmabuf_fds[i].get();
1097 }
1098 }
1086 qbuf.m.planes = qbuf_planes; 1099 qbuf.m.planes = qbuf_planes;
1087 qbuf.length = output_planes_count_; 1100 qbuf.length = output_planes_count_;
1088 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf); 1101 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
1089 output_record.at_device = true; 1102 output_record.at_device = true;
1090 output_buffer_queued_count_++; 1103 output_buffer_queued_count_++;
1091 DVLOGF(4) << "Enqueued output=" << qbuf.index 1104 DVLOGF(4) << "Enqueued output=" << qbuf.index
1092 << " count: " << output_buffer_queued_count_; 1105 << " count: " << output_buffer_queued_count_;
1093 1106
1094 return true; 1107 return true;
1095 } 1108 }
(...skipping 262 matching lines...) Expand 10 before | Expand all | Expand 10 after
1358 // yet. We will not start decoding without having surfaces available, 1371 // yet. We will not start decoding without having surfaces available,
1359 // and will schedule a decode task once the client provides the buffers. 1372 // and will schedule a decode task once the client provides the buffers.
1360 surface_set_change_pending_ = false; 1373 surface_set_change_pending_ = false;
1361 DVLOG(3) << "Surface set change finished"; 1374 DVLOG(3) << "Surface set change finished";
1362 return true; 1375 return true;
1363 } 1376 }
1364 1377
1365 bool V4L2SliceVideoDecodeAccelerator::DestroyOutputs(bool dismiss) { 1378 bool V4L2SliceVideoDecodeAccelerator::DestroyOutputs(bool dismiss) {
1366 DVLOGF(3); 1379 DVLOGF(3);
1367 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); 1380 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
1368 std::vector<EGLImageKHR> egl_images_to_destroy; 1381 std::vector<EGLImageKHR> egl_images_to_destroy;
kcwu 2016/03/22 05:42:54 unused
Pawel Osciak 2016/03/28 01:31:28 Done.
1369 std::vector<int32_t> picture_buffers_to_dismiss; 1382 std::vector<int32_t> picture_buffers_to_dismiss;
1370 1383
1371 if (output_buffer_map_.empty()) 1384 if (output_buffer_map_.empty())
1372 return true; 1385 return true;
1373 1386
1374 for (auto output_record : output_buffer_map_) { 1387 for (const auto& output_record : output_buffer_map_) {
1375 DCHECK(!output_record.at_device); 1388 DCHECK(!output_record.at_device);
1376 1389
1377 if (output_record.egl_sync != EGL_NO_SYNC_KHR) { 1390 if (output_record.egl_sync != EGL_NO_SYNC_KHR) {
1378 if (eglDestroySyncKHR(egl_display_, output_record.egl_sync) != EGL_TRUE) 1391 if (eglDestroySyncKHR(egl_display_, output_record.egl_sync) != EGL_TRUE)
1379 DVLOGF(1) << "eglDestroySyncKHR failed."; 1392 DVLOGF(1) << "eglDestroySyncKHR failed.";
1380 } 1393 }
1381 1394
1382 if (output_record.egl_image != EGL_NO_IMAGE_KHR) { 1395 if (output_record.egl_image != EGL_NO_IMAGE_KHR) {
1383 child_task_runner_->PostTask( 1396 child_task_runner_->PostTask(
1384 FROM_HERE, 1397 FROM_HERE,
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
1469 << ", requested " << req_buffer_count << ")"; 1482 << ", requested " << req_buffer_count << ")";
1470 NOTIFY_ERROR(INVALID_ARGUMENT); 1483 NOTIFY_ERROR(INVALID_ARGUMENT);
1471 return; 1484 return;
1472 } 1485 }
1473 1486
1474 // Allocate the output buffers. 1487 // Allocate the output buffers.
1475 struct v4l2_requestbuffers reqbufs; 1488 struct v4l2_requestbuffers reqbufs;
1476 memset(&reqbufs, 0, sizeof(reqbufs)); 1489 memset(&reqbufs, 0, sizeof(reqbufs));
1477 reqbufs.count = buffers.size(); 1490 reqbufs.count = buffers.size();
1478 reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; 1491 reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1479 reqbufs.memory = V4L2_MEMORY_MMAP; 1492 if (output_mode_ == Config::OutputMode::ALLOCATE)
1493 reqbufs.memory = V4L2_MEMORY_MMAP;
Owen Lin 2016/03/23 06:32:50 ditto.
Pawel Osciak 2016/03/28 01:31:28 Done.
1494 else
1495 reqbufs.memory = V4L2_MEMORY_DMABUF;
1480 IOCTL_OR_ERROR_RETURN(VIDIOC_REQBUFS, &reqbufs); 1496 IOCTL_OR_ERROR_RETURN(VIDIOC_REQBUFS, &reqbufs);
1481 1497
1482 if (reqbufs.count != buffers.size()) { 1498 if (reqbufs.count != buffers.size()) {
1483 DLOG(ERROR) << "Could not allocate enough output buffers"; 1499 DLOG(ERROR) << "Could not allocate enough output buffers";
1484 NOTIFY_ERROR(PLATFORM_FAILURE); 1500 NOTIFY_ERROR(PLATFORM_FAILURE);
1485 return; 1501 return;
1486 } 1502 }
1487 1503
1488 child_task_runner_->PostTask( 1504 DCHECK(free_output_buffers_.empty());
1489 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::CreateEGLImages, 1505 DCHECK(output_buffer_map_.empty());
1490 weak_this_, buffers, output_format_fourcc_, 1506 output_buffer_map_.resize(buffers.size());
1491 output_planes_count_)); 1507 for (size_t i = 0; i < output_buffer_map_.size(); ++i) {
1508 DCHECK(buffers[i].size() == coded_size_);
1509
1510 OutputRecord& output_record = output_buffer_map_[i];
1511 DCHECK(!output_record.at_device);
1512 DCHECK(!output_record.at_client);
1513 DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR);
1514 DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
1515 DCHECK_EQ(output_record.picture_id, -1);
1516 DCHECK(output_record.dmabuf_fds.empty());
1517 DCHECK_EQ(output_record.cleared, false);
1518
1519 output_record.picture_id = buffers[i].id();
1520 output_record.texture_id = buffers[i].texture_id();
1521 // This will remain true until ImportBufferForPicture is called, either by
1522 // the client, or by ourselves, if we are allocating.
1523 output_record.at_client = true;
Owen Lin 2016/03/23 06:32:50 As suggested by kcwu, maybe we can make it a singl
Pawel Osciak 2016/03/28 01:31:29 As discussed offline, to be addressed separately.
1524 if (output_mode_ == Config::OutputMode::ALLOCATE) {
1525 std::vector<base::ScopedFD> dmabuf_fds =
1526 std::move(device_->GetDmabufsForV4L2Buffer(
1527 i, output_planes_count_, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE));
1528 if (dmabuf_fds.empty()) {
1529 NOTIFY_ERROR(PLATFORM_FAILURE);
1530 return;
1531 }
1532
1533 auto passed_dmabuf_fds(make_scoped_ptr(
1534 new std::vector<base::ScopedFD>(std::move(dmabuf_fds))));
1535 ImportBufferForPictureTask(output_record.picture_id,
1536 std::move(passed_dmabuf_fds));
1537 } // else we'll get triggered via ImportBufferForPicture() from client.
1538 DVLOGF(3) << "buffer[" << i << "]: picture_id=" << output_record.picture_id;
1539 }
1540
1541 if (!StartDevicePoll()) {
1542 NOTIFY_ERROR(PLATFORM_FAILURE);
1543 return;
1544 }
1492 } 1545 }
1493 1546
1494 void V4L2SliceVideoDecodeAccelerator::CreateEGLImages( 1547 void V4L2SliceVideoDecodeAccelerator::CreateEGLImageFor(
1495 const std::vector<media::PictureBuffer>& buffers, 1548 size_t buffer_index,
1496 uint32_t output_format_fourcc, 1549 scoped_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds,
1497 size_t output_planes_count) { 1550 GLuint texture_id,
1498 DVLOGF(3); 1551 const gfx::Size& size,
1552 uint32_t fourcc) {
1553 DVLOGF(3) << "index=" << buffer_index;
1499 DCHECK(child_task_runner_->BelongsToCurrentThread()); 1554 DCHECK(child_task_runner_->BelongsToCurrentThread());
1500 1555
1501 gfx::GLContext* gl_context = get_gl_context_cb_.Run(); 1556 gfx::GLContext* gl_context = get_gl_context_cb_.Run();
1502 if (!gl_context || !make_context_current_cb_.Run()) { 1557 if (!gl_context || !make_context_current_cb_.Run()) {
1503 DLOG(ERROR) << "No GL context"; 1558 DLOG(ERROR) << "No GL context";
1504 NOTIFY_ERROR(PLATFORM_FAILURE); 1559 NOTIFY_ERROR(PLATFORM_FAILURE);
1505 return; 1560 return;
1506 } 1561 }
1507 1562
1508 gfx::ScopedTextureBinder bind_restore(GL_TEXTURE_EXTERNAL_OES, 0); 1563 gfx::ScopedTextureBinder bind_restore(GL_TEXTURE_EXTERNAL_OES, 0);
1509 1564
1510 std::vector<EGLImageKHR> egl_images; 1565 EGLImageKHR egl_image = device_->CreateEGLImage(egl_display_,
1511 for (size_t i = 0; i < buffers.size(); ++i) { 1566 gl_context->GetHandle(),
1512 EGLImageKHR egl_image = device_->CreateEGLImage(egl_display_, 1567 texture_id,
1513 gl_context->GetHandle(), 1568 size,
1514 buffers[i].texture_id(), 1569 buffer_index,
1515 buffers[i].size(), 1570 fourcc,
1516 i, 1571 *passed_dmabuf_fds);
1517 output_format_fourcc, 1572 if (egl_image == EGL_NO_IMAGE_KHR) {
1518 output_planes_count); 1573 LOGF(ERROR) << "Could not create EGLImageKHR,"
1519 if (egl_image == EGL_NO_IMAGE_KHR) { 1574 << " index=" << buffer_index << " texture_id=" << texture_id;
kcwu 2016/03/22 05:42:54 why not NOTIFY_ERROR and return here?
Pawel Osciak 2016/03/28 01:31:29 Done.
1520 LOGF(ERROR) << "Could not create EGLImageKHR";
1521 for (const auto& image_to_destroy : egl_images)
1522 device_->DestroyEGLImage(egl_display_, image_to_destroy);
1523
1524 NOTIFY_ERROR(PLATFORM_FAILURE);
1525 return;
1526 }
1527
1528 egl_images.push_back(egl_image);
1529 } 1575 }
1530 1576
1531 decoder_thread_task_runner_->PostTask( 1577 decoder_thread_task_runner_->PostTask(
1532 FROM_HERE, base::Bind( 1578 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::AssignEGLImage,
1533 &V4L2SliceVideoDecodeAccelerator::AssignEGLImages, 1579 base::Unretained(this), buffer_index, egl_image,
1534 base::Unretained(this), buffers, egl_images)); 1580 base::Passed(&passed_dmabuf_fds)));
1535 } 1581 }
1536 1582
1537 void V4L2SliceVideoDecodeAccelerator::AssignEGLImages( 1583 void V4L2SliceVideoDecodeAccelerator::AssignEGLImage(
1538 const std::vector<media::PictureBuffer>& buffers, 1584 size_t buffer_index,
1539 const std::vector<EGLImageKHR>& egl_images) { 1585 EGLImageKHR egl_image,
1540 DVLOGF(3); 1586 scoped_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds) {
1587 DVLOGF(3) << "index=" << buffer_index;
1541 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); 1588 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
1542 DCHECK_EQ(buffers.size(), egl_images.size());
1543 1589
1544 DCHECK(free_output_buffers_.empty()); 1590 DCHECK_LT(buffer_index, output_buffer_map_.size());
1545 DCHECK(output_buffer_map_.empty()); 1591 OutputRecord& output_record = output_buffer_map_[buffer_index];
1592 DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR);
1593 DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
1594 DCHECK(!output_record.at_client);
1595 DCHECK(!output_record.at_device);
1546 1596
1547 output_buffer_map_.resize(buffers.size()); 1597 output_record.egl_image = egl_image;
1548 1598 if (output_mode_ == Config::OutputMode::IMPORT) {
1549 for (size_t i = 0; i < output_buffer_map_.size(); ++i) { 1599 DCHECK(output_record.dmabuf_fds.empty());
1550 DCHECK(buffers[i].size() == coded_size_); 1600 output_record.dmabuf_fds.swap(*passed_dmabuf_fds);
Owen Lin 2016/03/23 06:32:50 std::move(*passed_dmabuf_fds);
Pawel Osciak 2016/03/28 01:31:28 Done.
1551
1552 OutputRecord& output_record = output_buffer_map_[i];
1553 DCHECK(!output_record.at_device);
1554 DCHECK(!output_record.at_client);
1555 DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR);
1556 DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
1557 DCHECK_EQ(output_record.picture_id, -1);
1558 DCHECK_EQ(output_record.cleared, false);
1559
1560 output_record.egl_image = egl_images[i];
1561 output_record.picture_id = buffers[i].id();
1562 free_output_buffers_.push_back(i);
1563 DVLOGF(3) << "buffer[" << i << "]: picture_id=" << output_record.picture_id;
1564 } 1601 }
1565 1602
1566 if (!StartDevicePoll()) { 1603 DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(),
Owen Lin 2016/03/23 06:32:50 Will it be nice to have a static function: bool c
Pawel Osciak 2016/03/28 01:31:29 Hmm... Sound like a good idea, but I'm on the fenc
1567 NOTIFY_ERROR(PLATFORM_FAILURE); 1604 buffer_index),
1605 0);
1606 free_output_buffers_.push_back(buffer_index);
1607 ScheduleDecodeBufferTaskIfNeeded();
1608 }
1609
1610 void V4L2SliceVideoDecodeAccelerator::ImportBufferForPicture(
1611 int32_t picture_buffer_id,
1612 const std::vector<gfx::GpuMemoryBufferHandle>& gpu_memory_buffer_handles) {
1613 DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id;
1614 DCHECK(child_task_runner_->BelongsToCurrentThread());
1615
1616 auto passed_dmabuf_fds(make_scoped_ptr(new std::vector<base::ScopedFD>));
kcwu 2016/03/22 05:42:54 Add "()" for new. new std::vector<base::ScopedFD>(
Pawel Osciak 2016/03/28 01:31:29 Done.
1617 for (const auto& handle : gpu_memory_buffer_handles) {
1618 int fd = handle.native_pixmap_handle.fd.fd;
kcwu 2016/03/22 05:42:54 I found native_pixelmap_handle is only available i
Pawel Osciak 2016/03/28 01:31:29 Done.
1619 DCHECK_NE(fd, -1);
1620 passed_dmabuf_fds->push_back(base::ScopedFD(fd));
1621 }
1622
1623 if (output_mode_ != Config::OutputMode::IMPORT) {
1624 LOGF(ERROR) << "Cannot import in non-import mode";
1625 NOTIFY_ERROR(INVALID_ARGUMENT);
1568 return; 1626 return;
1569 } 1627 }
1570 1628
1571 ProcessPendingEventsIfNeeded(); 1629 decoder_thread_task_runner_->PostTask(
1630 FROM_HERE,
1631 base::Bind(&V4L2SliceVideoDecodeAccelerator::ImportBufferForPictureTask,
1632 base::Unretained(this), picture_buffer_id,
1633 base::Passed(&passed_dmabuf_fds)));
1634 }
1635
1636 void V4L2SliceVideoDecodeAccelerator::ImportBufferForPictureTask(
1637 int32_t picture_buffer_id,
1638 scoped_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds) {
1639 DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id;
1640 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
1641
1642 const auto iter =
1643 std::find_if(output_buffer_map_.begin(), output_buffer_map_.end(),
1644 [picture_buffer_id](const OutputRecord& output_record) {
1645 return output_record.picture_id == picture_buffer_id;
1646 });
1647 if (iter == output_buffer_map_.end()) {
1648 LOGF(ERROR) << "Invalid picture_buffer_id=" << picture_buffer_id;
1649 NOTIFY_ERROR(INVALID_ARGUMENT);
1650 return;
1651 }
1652
1653 if (!iter->at_client) {
1654 LOGF(ERROR) << "Cannot import buffer that not owned by client";
1655 NOTIFY_ERROR(INVALID_ARGUMENT);
1656 return;
1657 }
1658
1659 size_t index = iter - output_buffer_map_.begin();
1660 DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(),
1661 index),
1662 0);
1663
1664 DCHECK(!iter->at_device);
1665 iter->at_client = false;
1666 if (iter->texture_id != 0) {
1667 if (iter->egl_image != EGL_NO_IMAGE_KHR) {
1668 child_task_runner_->PostTask(
1669 FROM_HERE,
1670 base::Bind(base::IgnoreResult(&V4L2Device::DestroyEGLImage), device_,
1671 egl_display_, iter->egl_image));
1672 }
1673
1674 child_task_runner_->PostTask(
1675 FROM_HERE,
1676 base::Bind(&V4L2SliceVideoDecodeAccelerator::CreateEGLImageFor,
1677 weak_this_, index, base::Passed(&passed_dmabuf_fds),
1678 iter->texture_id, coded_size_, output_format_fourcc_));
1679 } else {
1680 // No need for an EGLImage, start using this buffer now.
1681 DCHECK_EQ(output_planes_count_, passed_dmabuf_fds->size());
1682 iter->dmabuf_fds.swap(*passed_dmabuf_fds);
1683 free_output_buffers_.push_back(index);
1684 ScheduleDecodeBufferTaskIfNeeded();
1685 }
1572 } 1686 }
1573 1687
1574 void V4L2SliceVideoDecodeAccelerator::ReusePictureBuffer( 1688 void V4L2SliceVideoDecodeAccelerator::ReusePictureBuffer(
1575 int32_t picture_buffer_id) { 1689 int32_t picture_buffer_id) {
1576 DCHECK(child_task_runner_->BelongsToCurrentThread()); 1690 DCHECK(child_task_runner_->BelongsToCurrentThread());
1577 DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id; 1691 DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id;
1578 1692
1579 if (!make_context_current_cb_.Run()) { 1693 if (!make_context_current_cb_.Run()) {
1580 LOGF(ERROR) << "could not make context current"; 1694 LOGF(ERROR) << "could not make context current";
1581 NOTIFY_ERROR(PLATFORM_FAILURE); 1695 NOTIFY_ERROR(PLATFORM_FAILURE);
(...skipping 921 matching lines...) Expand 10 before | Expand all | Expand 10 after
2503 OutputRecord& output_record = 2617 OutputRecord& output_record =
2504 output_buffer_map_[dec_surface->output_record()]; 2618 output_buffer_map_[dec_surface->output_record()];
2505 2619
2506 bool inserted = 2620 bool inserted =
2507 surfaces_at_display_.insert(std::make_pair(output_record.picture_id, 2621 surfaces_at_display_.insert(std::make_pair(output_record.picture_id,
2508 dec_surface)).second; 2622 dec_surface)).second;
2509 DCHECK(inserted); 2623 DCHECK(inserted);
2510 2624
2511 DCHECK(!output_record.at_client); 2625 DCHECK(!output_record.at_client);
2512 DCHECK(!output_record.at_device); 2626 DCHECK(!output_record.at_device);
2513 DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR);
2514 DCHECK_NE(output_record.picture_id, -1); 2627 DCHECK_NE(output_record.picture_id, -1);
2515 output_record.at_client = true; 2628 output_record.at_client = true;
2516 2629
2517 // TODO(posciak): Use visible size from decoder here instead 2630 // TODO(posciak): Use visible size from decoder here instead
2518 // (crbug.com/402760). Passing (0, 0) results in the client using the 2631 // (crbug.com/402760). Passing (0, 0) results in the client using the
2519 // visible size extracted from the container instead. 2632 // visible size extracted from the container instead.
2520 media::Picture picture(output_record.picture_id, dec_surface->bitstream_id(), 2633 media::Picture picture(output_record.picture_id, dec_surface->bitstream_id(),
2521 gfx::Rect(0, 0), false); 2634 gfx::Rect(0, 0), false);
2522 DVLOGF(3) << dec_surface->ToString() 2635 DVLOGF(3) << dec_surface->ToString()
2523 << ", bitstream_id: " << picture.bitstream_buffer_id() 2636 << ", bitstream_id: " << picture.bitstream_buffer_id()
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
2609 } 2722 }
2610 2723
2611 bool V4L2SliceVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread( 2724 bool V4L2SliceVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
2612 const base::WeakPtr<Client>& decode_client, 2725 const base::WeakPtr<Client>& decode_client,
2613 const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) { 2726 const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
2614 decode_client_ = decode_client_; 2727 decode_client_ = decode_client_;
2615 decode_task_runner_ = decode_task_runner; 2728 decode_task_runner_ = decode_task_runner;
2616 return true; 2729 return true;
2617 } 2730 }
2618 2731
2732 media::VideoPixelFormat V4L2SliceVideoDecodeAccelerator::GetOutputFormat()
2733 const {
2734 return V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_);
2735 }
2736
2619 // static 2737 // static
2620 media::VideoDecodeAccelerator::SupportedProfiles 2738 media::VideoDecodeAccelerator::SupportedProfiles
2621 V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles() { 2739 V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles() {
2622 scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder); 2740 scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder);
2623 if (!device) 2741 if (!device)
2624 return SupportedProfiles(); 2742 return SupportedProfiles();
2625 2743
2626 return device->GetSupportedDecodeProfiles(arraysize(supported_input_fourccs_), 2744 return device->GetSupportedDecodeProfiles(arraysize(supported_input_fourccs_),
2627 supported_input_fourccs_); 2745 supported_input_fourccs_);
2628 } 2746 }
2629 2747
2630 } // namespace content 2748 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698