Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(263)

Unified Diff: content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc

Issue 1822983002: Support external buffer import in VDA interface and add a V4L2SVDA impl. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: include reland of crrev.com/1643123003 with fixes and rebase Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc
diff --git a/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc b/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc
index 50e6c59503465848a8cc33e62d6957c6f4a8d983..cf0ad1c41f6c051818f186e70c2f67d3966089e5 100644
--- a/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc
+++ b/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc
@@ -17,6 +17,7 @@
#include "base/callback_helpers.h"
#include "base/command_line.h"
#include "base/macros.h"
+#include "base/memory/ptr_util.h"
#include "base/numerics/safe_conversions.h"
#include "base/strings/stringprintf.h"
#include "content/common/gpu/media/shared_memory_region.h"
@@ -162,10 +163,10 @@ V4L2SliceVideoDecodeAccelerator::OutputRecord::OutputRecord()
: at_device(false),
at_client(false),
picture_id(-1),
+ texture_id(0),
egl_image(EGL_NO_IMAGE_KHR),
egl_sync(EGL_NO_SYNC_KHR),
- cleared(false) {
-}
+ cleared(false) {}
struct V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef {
BitstreamBufferRef(
@@ -395,11 +396,11 @@ V4L2SliceVideoDecodeAccelerator::V4L2SliceVideoDecodeAccelerator(
video_profile_(media::VIDEO_CODEC_PROFILE_UNKNOWN),
output_format_fourcc_(0),
state_(kUninitialized),
+ output_mode_(Config::OutputMode::ALLOCATE),
decoder_flushing_(false),
decoder_resetting_(false),
surface_set_change_pending_(false),
picture_clearing_count_(0),
- pictures_assigned_(false, false),
egl_display_(egl_display),
get_gl_context_cb_(get_gl_context_cb),
make_context_current_cb_(make_context_current_cb),
@@ -438,8 +439,10 @@ bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config,
DCHECK(child_task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, kUninitialized);
- if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) {
- NOTREACHED() << "GL callbacks are required for this VDA";
+ if (!device_->SupportsDecodeProfileForV4L2PixelFormats(
+ config.profile, arraysize(supported_input_fourccs_),
+ supported_input_fourccs_)) {
+ DVLOGF(1) << "unsupported profile " << config.profile;
return false;
}
@@ -448,10 +451,9 @@ bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config,
return false;
}
- if (!device_->SupportsDecodeProfileForV4L2PixelFormats(
- config.profile, arraysize(supported_input_fourccs_),
- supported_input_fourccs_)) {
- DVLOGF(1) << "unsupported profile " << config.profile;
+ if (config.output_mode != Config::OutputMode::ALLOCATE &&
+ config.output_mode != Config::OutputMode::IMPORT) {
+ NOTREACHED() << "Only ALLOCATE and IMPORT OutputModes are supported";
return false;
}
@@ -492,14 +494,18 @@ bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config,
}
// We need the context to be initialized to query extensions.
- if (!make_context_current_cb_.Run()) {
- LOG(ERROR) << "Initialize(): could not make context current";
- return false;
- }
+ if (!make_context_current_cb_.is_null()) {
+ if (!make_context_current_cb_.Run()) {
+ LOG(ERROR) << "Initialize(): could not make context current";
+ return false;
+ }
- if (!gfx::g_driver_egl.ext.b_EGL_KHR_fence_sync) {
- LOG(ERROR) << "Initialize(): context does not have EGL_KHR_fence_sync";
- return false;
+ if (!gfx::g_driver_egl.ext.b_EGL_KHR_fence_sync) {
+ LOG(ERROR) << "Initialize(): context does not have EGL_KHR_fence_sync";
+ return false;
+ }
+ } else {
+ DVLOG(1) << "No GL callbacks provided, initializing without GL support";
}
// Capabilities check.
@@ -522,6 +528,7 @@ bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config,
decoder_thread_task_runner_ = decoder_thread_.task_runner();
state_ = kInitialized;
+ output_mode_ = config.output_mode;
// InitializeTask will NOTIFY_ERROR on failure.
decoder_thread_task_runner_->PostTask(
@@ -554,11 +561,6 @@ void V4L2SliceVideoDecodeAccelerator::Destroy() {
FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::DestroyTask,
base::Unretained(this)));
- // Wake up decoder thread in case we are waiting in CreateOutputBuffers
- // for client to provide pictures. Since this is Destroy, we won't be
- // getting them anymore (AssignPictureBuffers won't be called).
- pictures_assigned_.Signal();
-
// Wait for tasks to finish/early-exit.
decoder_thread_.Stop();
}
@@ -760,12 +762,7 @@ bool V4L2SliceVideoDecodeAccelerator::CreateOutputBuffers() {
client_, num_pictures, 1, coded_size_,
device_->GetTextureTarget()));
wuchengli 2016/04/13 10:54:25 Please document what the state is before setting i
Pawel Osciak 2016/04/18 06:17:32 Done.
- // Wait for the client to call AssignPictureBuffers() on the Child thread.
- // We do this, because if we continue decoding without finishing buffer
- // allocation, we may end up Resetting before AssignPictureBuffers arrives,
- // resulting in unnecessary complications and subtle bugs.
- pictures_assigned_.Wait();
-
+ state_ = kAwaitingPictureBuffers;
return true;
}
@@ -792,7 +789,7 @@ void V4L2SliceVideoDecodeAccelerator::DestroyInputBuffers() {
}
void V4L2SliceVideoDecodeAccelerator::DismissPictures(
- std::vector<int32_t> picture_buffer_ids,
+ const std::vector<int32_t>& picture_buffer_ids,
base::WaitableEvent* done) {
DVLOGF(3);
DCHECK(child_task_runner_->BelongsToCurrentThread());
@@ -906,7 +903,7 @@ void V4L2SliceVideoDecodeAccelerator::Dequeue() {
memset(&dqbuf, 0, sizeof(dqbuf));
memset(&planes, 0, sizeof(planes));
dqbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- dqbuf.memory = V4L2_MEMORY_USERPTR;
+ dqbuf.memory = V4L2_MEMORY_MMAP;
dqbuf.m.planes = planes;
dqbuf.length = input_planes_count_;
if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) {
@@ -932,7 +929,9 @@ void V4L2SliceVideoDecodeAccelerator::Dequeue() {
memset(&dqbuf, 0, sizeof(dqbuf));
memset(&planes, 0, sizeof(planes));
dqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- dqbuf.memory = V4L2_MEMORY_MMAP;
+ dqbuf.memory =
+ (output_mode_ == Config::OutputMode::ALLOCATE ? V4L2_MEMORY_MMAP
+ : V4L2_MEMORY_DMABUF);
dqbuf.m.planes = planes;
dqbuf.length = output_planes_count_;
if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) {
@@ -968,16 +967,47 @@ void V4L2SliceVideoDecodeAccelerator::Dequeue() {
ProcessPendingEventsIfNeeded();
}
+void V4L2SliceVideoDecodeAccelerator::NewEventPending() {
+ // Switch to event processing mode if we are decoding. Otherwise we are either
+ // already in it, or we will potentially switch to it later, after finishing
+ // other tasks.
+ if (state_ == kDecoding)
+ state_ = kIdle;
+
+ ProcessPendingEventsIfNeeded();
+}
+
+bool V4L2SliceVideoDecodeAccelerator::FinishEventProcessing() {
+ DCHECK_EQ(state_, kIdle);
+
+ state_ = kDecoding;
+ ScheduleDecodeBufferTaskIfNeeded();
+
+ return true;
+}
+
void V4L2SliceVideoDecodeAccelerator::ProcessPendingEventsIfNeeded() {
+ DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
// Process pending events, if any, in the correct order.
// We always first process the surface set change, as it is an internal
// event from the decoder and interleaving it with external requests would
// put the decoder in an undefined state.
- FinishSurfaceSetChangeIfNeeded();
+ using ProcessFunc = bool (V4L2SliceVideoDecodeAccelerator::*)();
+ const ProcessFunc process_functions[] = {
+ &V4L2SliceVideoDecodeAccelerator::FinishSurfaceSetChange,
+ &V4L2SliceVideoDecodeAccelerator::FinishFlush,
+ &V4L2SliceVideoDecodeAccelerator::FinishReset,
+ &V4L2SliceVideoDecodeAccelerator::FinishEventProcessing,
+ };
+
+ for (const auto& fn : process_functions) {
+ if (state_ != kIdle)
+ return;
- // Process external (client) requests.
- FinishFlushIfNeeded();
- FinishResetIfNeeded();
+ if (!(this->*fn)())
+ return;
+ }
}
void V4L2SliceVideoDecodeAccelerator::ReuseInputBuffer(int index) {
@@ -1050,7 +1080,6 @@ bool V4L2SliceVideoDecodeAccelerator::EnqueueOutputRecord(int index) {
OutputRecord& output_record = output_buffer_map_[index];
DCHECK(!output_record.at_device);
DCHECK(!output_record.at_client);
- DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR);
DCHECK_NE(output_record.picture_id, -1);
if (output_record.egl_sync != EGL_NO_SYNC_KHR) {
@@ -1076,7 +1105,16 @@ bool V4L2SliceVideoDecodeAccelerator::EnqueueOutputRecord(int index) {
memset(qbuf_planes, 0, sizeof(qbuf_planes));
qbuf.index = index;
qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- qbuf.memory = V4L2_MEMORY_MMAP;
+ if (output_mode_ == Config::OutputMode::ALLOCATE) {
+ qbuf.memory = V4L2_MEMORY_MMAP;
+ } else {
+ qbuf.memory = V4L2_MEMORY_DMABUF;
+ DCHECK_EQ(output_planes_count_, output_record.dmabuf_fds.size());
+ for (size_t i = 0; i < output_record.dmabuf_fds.size(); ++i) {
+ DCHECK(output_record.dmabuf_fds[i].is_valid());
+ qbuf_planes[i].m.fd = output_record.dmabuf_fds[i].get();
+ }
+ }
qbuf.m.planes = qbuf_planes;
qbuf.length = output_planes_count_;
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
@@ -1302,22 +1340,22 @@ void V4L2SliceVideoDecodeAccelerator::DecodeBufferTask() {
void V4L2SliceVideoDecodeAccelerator::InitiateSurfaceSetChange() {
DVLOGF(2);
DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
-
DCHECK_EQ(state_, kDecoding);
- state_ = kIdle;
DCHECK(!surface_set_change_pending_);
surface_set_change_pending_ = true;
-
- FinishSurfaceSetChangeIfNeeded();
+ NewEventPending();
}
-void V4L2SliceVideoDecodeAccelerator::FinishSurfaceSetChangeIfNeeded() {
+bool V4L2SliceVideoDecodeAccelerator::FinishSurfaceSetChange() {
DVLOGF(2);
DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
- if (!surface_set_change_pending_ || !surfaces_at_device_.empty())
- return;
+ if (!surface_set_change_pending_)
+ return true;
+
+ if (!surfaces_at_device_.empty())
+ return false;
DCHECK_EQ(state_, kIdle);
DCHECK(decoder_display_queue_.empty());
@@ -1330,7 +1368,7 @@ void V4L2SliceVideoDecodeAccelerator::FinishSurfaceSetChangeIfNeeded() {
// Keep input queue running while we switch outputs.
if (!StopDevicePoll(true)) {
NOTIFY_ERROR(PLATFORM_FAILURE);
- return;
+ return false;
}
// This will return only once all buffers are dismissed and destroyed.
@@ -1339,36 +1377,28 @@ void V4L2SliceVideoDecodeAccelerator::FinishSurfaceSetChangeIfNeeded() {
// after displaying.
if (!DestroyOutputs(true)) {
NOTIFY_ERROR(PLATFORM_FAILURE);
- return;
+ return false;
}
if (!CreateOutputBuffers()) {
NOTIFY_ERROR(PLATFORM_FAILURE);
- return;
- }
-
- if (!StartDevicePoll()) {
- NOTIFY_ERROR(PLATFORM_FAILURE);
- return;
+ return false;
}
- DVLOGF(3) << "Surface set change finished";
-
surface_set_change_pending_ = false;
- state_ = kDecoding;
- ScheduleDecodeBufferTaskIfNeeded();
+ DVLOG(3) << "Surface set change finished";
+ return true;
}
bool V4L2SliceVideoDecodeAccelerator::DestroyOutputs(bool dismiss) {
DVLOGF(3);
DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
- std::vector<EGLImageKHR> egl_images_to_destroy;
std::vector<int32_t> picture_buffers_to_dismiss;
if (output_buffer_map_.empty())
return true;
- for (auto output_record : output_buffer_map_) {
+ for (const auto& output_record : output_buffer_map_) {
DCHECK(!output_record.at_device);
if (output_record.egl_sync != EGL_NO_SYNC_KHR) {
@@ -1446,6 +1476,18 @@ void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffers(
DVLOGF(3);
DCHECK(child_task_runner_->BelongsToCurrentThread());
+ decoder_thread_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&V4L2SliceVideoDecodeAccelerator::AssignPictureBuffersTask,
+ base::Unretained(this), buffers));
+}
+
+void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffersTask(
+ const std::vector<media::PictureBuffer>& buffers) {
+ DVLOGF(3);
+ DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+ DCHECK_EQ(state_, kAwaitingPictureBuffers);
+
const uint32_t req_buffer_count = decoder_->GetRequiredNumOfPictures();
if (buffers.size() < req_buffer_count) {
@@ -1456,24 +1498,14 @@ void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffers(
return;
}
- gfx::GLContext* gl_context = get_gl_context_cb_.Run();
- if (!gl_context || !make_context_current_cb_.Run()) {
- DLOG(ERROR) << "No GL context";
- NOTIFY_ERROR(PLATFORM_FAILURE);
- return;
- }
-
- gfx::ScopedTextureBinder bind_restore(GL_TEXTURE_EXTERNAL_OES, 0);
-
- // It's safe to manipulate all the buffer state here, because the decoder
- // thread is waiting on pictures_assigned_.
-
// Allocate the output buffers.
struct v4l2_requestbuffers reqbufs;
memset(&reqbufs, 0, sizeof(reqbufs));
reqbufs.count = buffers.size();
reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- reqbufs.memory = V4L2_MEMORY_MMAP;
+ reqbufs.memory =
+ (output_mode_ == Config::OutputMode::ALLOCATE ? V4L2_MEMORY_MMAP
+ : V4L2_MEMORY_DMABUF);
IOCTL_OR_ERROR_RETURN(VIDIOC_REQBUFS, &reqbufs);
if (reqbufs.count != buffers.size()) {
@@ -1482,11 +1514,12 @@ void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffers(
return;
}
- output_buffer_map_.resize(buffers.size());
-
DCHECK(free_output_buffers_.empty());
+ DCHECK(output_buffer_map_.empty());
+ output_buffer_map_.resize(buffers.size());
for (size_t i = 0; i < output_buffer_map_.size(); ++i) {
DCHECK(buffers[i].size() == coded_size_);
+ DCHECK_EQ(1u, buffers[i].texture_ids().size());
OutputRecord& output_record = output_buffer_map_[i];
DCHECK(!output_record.at_device);
@@ -1494,51 +1527,235 @@ void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffers(
DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR);
DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
DCHECK_EQ(output_record.picture_id, -1);
+ DCHECK(output_record.dmabuf_fds.empty());
DCHECK_EQ(output_record.cleared, false);
- DCHECK_LE(1u, buffers[i].texture_ids().size());
- EGLImageKHR egl_image = device_->CreateEGLImage(
- egl_display_, gl_context->GetHandle(), buffers[i].texture_ids()[0],
- buffers[i].size(), i, output_format_fourcc_, output_planes_count_);
- if (egl_image == EGL_NO_IMAGE_KHR) {
- LOGF(ERROR) << "Could not create EGLImageKHR";
- // Ownership of EGLImages allocated in previous iterations of this loop
- // has been transferred to output_buffer_map_. After we error-out here
- // the destructor will handle their cleanup.
- NOTIFY_ERROR(PLATFORM_FAILURE);
- return;
- }
-
- output_record.egl_image = egl_image;
output_record.picture_id = buffers[i].id();
- free_output_buffers_.push_back(i);
+ output_record.texture_id = buffers[i].texture_ids()[0];
+ // This will remain true until ImportBufferForPicture is called, either by
+ // the client, or by ourselves, if we are allocating.
+ output_record.at_client = true;
+ if (output_mode_ == Config::OutputMode::ALLOCATE) {
+ std::vector<base::ScopedFD> dmabuf_fds =
+ std::move(device_->GetDmabufsForV4L2Buffer(
+ i, output_planes_count_, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE));
+ if (dmabuf_fds.empty()) {
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return;
+ }
+
+ auto passed_dmabuf_fds(base::WrapUnique(
+ new std::vector<base::ScopedFD>(std::move(dmabuf_fds))));
+ ImportBufferForPictureTask(output_record.picture_id,
+ std::move(passed_dmabuf_fds));
+ } // else we'll get triggered via ImportBufferForPicture() from client.
DVLOGF(3) << "buffer[" << i << "]: picture_id=" << output_record.picture_id;
}
- pictures_assigned_.Signal();
+ if (!StartDevicePoll()) {
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return;
+ }
+
+ // Put us in kIdle to allow further event processing.
+ // ProcessPendingEventsIfNeeded() will put us back into kDecoding after all
+ // other pending events are processed successfully.
+ state_ = kIdle;
+ ProcessPendingEventsIfNeeded();
}
-void V4L2SliceVideoDecodeAccelerator::ReusePictureBuffer(
- int32_t picture_buffer_id) {
+void V4L2SliceVideoDecodeAccelerator::CreateEGLImageFor(
+ size_t buffer_index,
+ int32_t picture_buffer_id,
+ std::unique_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds,
+ GLuint texture_id,
+ const gfx::Size& size,
+ uint32_t fourcc) {
+ DVLOGF(3) << "index=" << buffer_index;
DCHECK(child_task_runner_->BelongsToCurrentThread());
- DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id;
- if (!make_context_current_cb_.Run()) {
- LOGF(ERROR) << "could not make context current";
+ if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) {
+ DLOG(ERROR) << "GL callbacks required for binding to EGLImages";
+ NOTIFY_ERROR(INVALID_ARGUMENT);
+ return;
+ }
+
+ gfx::GLContext* gl_context = get_gl_context_cb_.Run();
+ if (!gl_context || !make_context_current_cb_.Run()) {
+ DLOG(ERROR) << "No GL context";
NOTIFY_ERROR(PLATFORM_FAILURE);
return;
}
- EGLSyncKHR egl_sync =
- eglCreateSyncKHR(egl_display_, EGL_SYNC_FENCE_KHR, NULL);
- if (egl_sync == EGL_NO_SYNC_KHR) {
- LOGF(ERROR) << "eglCreateSyncKHR() failed";
+ gfx::ScopedTextureBinder bind_restore(GL_TEXTURE_EXTERNAL_OES, 0);
+
+ EGLImageKHR egl_image =
+ device_->CreateEGLImage(egl_display_, gl_context->GetHandle(), texture_id,
+ size, buffer_index, fourcc, *passed_dmabuf_fds);
+ if (egl_image == EGL_NO_IMAGE_KHR) {
+ LOGF(ERROR) << "Could not create EGLImageKHR,"
+ << " index=" << buffer_index << " texture_id=" << texture_id;
NOTIFY_ERROR(PLATFORM_FAILURE);
return;
}
- std::unique_ptr<EGLSyncKHRRef> egl_sync_ref(
- new EGLSyncKHRRef(egl_display_, egl_sync));
+ decoder_thread_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&V4L2SliceVideoDecodeAccelerator::AssignEGLImage,
+ base::Unretained(this), buffer_index, picture_buffer_id,
+ egl_image, base::Passed(&passed_dmabuf_fds)));
+}
+
+void V4L2SliceVideoDecodeAccelerator::AssignEGLImage(
+ size_t buffer_index,
+ int32_t picture_buffer_id,
+ EGLImageKHR egl_image,
+ std::unique_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds) {
+ DVLOGF(3) << "index=" << buffer_index;
+ DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+ // It's possible that while waiting for the EGLImages to be allocated and
+ // assigned, we have already decoded more of the stream and saw another
+ // resolution change. This is a normal situation, in such a case either there
+ // is no output record with this index awaiting an EGLImage to be assigned to
+ // it, or the record is already updated to use a newer PictureBuffer and is
+ // awaiting an EGLImage associated with a different picture_buffer_id. If so,
+ // just discard this image, we will get the one we are waiting for later.
+ if (buffer_index >= output_buffer_map_.size() ||
+ output_buffer_map_[buffer_index].picture_id != picture_buffer_id) {
+ DVLOGF(3) << "Picture set already changed, dropping EGLImage";
+ child_task_runner_->PostTask(
+ FROM_HERE, base::Bind(base::IgnoreResult(&V4L2Device::DestroyEGLImage),
+ device_, egl_display_, egl_image));
+ return;
+ }
+
+ OutputRecord& output_record = output_buffer_map_[buffer_index];
+ DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR);
+ DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
+ DCHECK(!output_record.at_client);
+ DCHECK(!output_record.at_device);
+
+ output_record.egl_image = egl_image;
+ if (output_mode_ == Config::OutputMode::IMPORT) {
+ DCHECK(output_record.dmabuf_fds.empty());
+ output_record.dmabuf_fds = std::move(*passed_dmabuf_fds);
+ }
+
+ DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(),
+ buffer_index),
+ 0);
+ free_output_buffers_.push_back(buffer_index);
+ ScheduleDecodeBufferTaskIfNeeded();
+}
+
+void V4L2SliceVideoDecodeAccelerator::ImportBufferForPicture(
+ int32_t picture_buffer_id,
+ const std::vector<gfx::GpuMemoryBufferHandle>& gpu_memory_buffer_handles) {
+ DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id;
+ DCHECK(child_task_runner_->BelongsToCurrentThread());
+
+ auto passed_dmabuf_fds(base::WrapUnique(new std::vector<base::ScopedFD>()));
+ for (const auto& handle : gpu_memory_buffer_handles) {
+ int fd = -1;
+#if defined(USE_OZONE)
+ fd = handle.native_pixmap_handle.fd.fd;
+#endif
+ DCHECK_NE(fd, -1);
+ passed_dmabuf_fds->push_back(base::ScopedFD(fd));
+ }
+
+ if (output_mode_ != Config::OutputMode::IMPORT) {
+ LOGF(ERROR) << "Cannot import in non-import mode";
+ NOTIFY_ERROR(INVALID_ARGUMENT);
+ return;
+ }
+
+ decoder_thread_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&V4L2SliceVideoDecodeAccelerator::ImportBufferForPictureTask,
+ base::Unretained(this), picture_buffer_id,
+ base::Passed(&passed_dmabuf_fds)));
+}
+
+void V4L2SliceVideoDecodeAccelerator::ImportBufferForPictureTask(
+ int32_t picture_buffer_id,
+ std::unique_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds) {
+ DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id;
+ DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+ const auto iter =
+ std::find_if(output_buffer_map_.begin(), output_buffer_map_.end(),
+ [picture_buffer_id](const OutputRecord& output_record) {
+ return output_record.picture_id == picture_buffer_id;
+ });
+ if (iter == output_buffer_map_.end()) {
+ LOGF(ERROR) << "Invalid picture_buffer_id=" << picture_buffer_id;
+ NOTIFY_ERROR(INVALID_ARGUMENT);
+ return;
+ }
+
+ if (!iter->at_client) {
+ LOGF(ERROR) << "Cannot import buffer that not owned by client";
+ NOTIFY_ERROR(INVALID_ARGUMENT);
+ return;
+ }
+
+ size_t index = iter - output_buffer_map_.begin();
+ DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(),
+ index),
+ 0);
+
+ DCHECK(!iter->at_device);
+ iter->at_client = false;
+ if (iter->texture_id != 0) {
+ if (iter->egl_image != EGL_NO_IMAGE_KHR) {
+ child_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(base::IgnoreResult(&V4L2Device::DestroyEGLImage), device_,
+ egl_display_, iter->egl_image));
+ }
+
+ child_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&V4L2SliceVideoDecodeAccelerator::CreateEGLImageFor,
+ weak_this_, index, picture_buffer_id,
+ base::Passed(&passed_dmabuf_fds), iter->texture_id,
+ coded_size_, output_format_fourcc_));
+ } else {
+ // No need for an EGLImage, start using this buffer now.
+ DCHECK_EQ(output_planes_count_, passed_dmabuf_fds->size());
+ iter->dmabuf_fds.swap(*passed_dmabuf_fds);
+ free_output_buffers_.push_back(index);
+ ScheduleDecodeBufferTaskIfNeeded();
+ }
+}
+
+void V4L2SliceVideoDecodeAccelerator::ReusePictureBuffer(
+ int32_t picture_buffer_id) {
+ DCHECK(child_task_runner_->BelongsToCurrentThread());
+ DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id;
+
+ std::unique_ptr<EGLSyncKHRRef> egl_sync_ref;
+
+ if (!make_context_current_cb_.is_null()) {
+ if (!make_context_current_cb_.Run()) {
+ LOGF(ERROR) << "could not make context current";
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return;
+ }
+
+ EGLSyncKHR egl_sync =
+ eglCreateSyncKHR(egl_display_, EGL_SYNC_FENCE_KHR, NULL);
+ if (egl_sync == EGL_NO_SYNC_KHR) {
+ LOGF(ERROR) << "eglCreateSyncKHR() failed";
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return;
+ }
+
+ egl_sync_ref.reset(new EGLSyncKHRRef(egl_display_, egl_sync));
+ }
+
decoder_thread_task_runner_->PostTask(
FROM_HERE,
base::Bind(&V4L2SliceVideoDecodeAccelerator::ReusePictureBufferTask,
@@ -1575,9 +1792,12 @@ void V4L2SliceVideoDecodeAccelerator::ReusePictureBufferTask(
DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
DCHECK(!output_record.at_device);
output_record.at_client = false;
- output_record.egl_sync = egl_sync_ref->egl_sync;
- // Take ownership of the EGLSync.
- egl_sync_ref->egl_sync = EGL_NO_SYNC_KHR;
+ if (egl_sync_ref) {
+ output_record.egl_sync = egl_sync_ref->egl_sync;
+ // Take ownership of the EGLSync.
+ egl_sync_ref->egl_sync = EGL_NO_SYNC_KHR;
+ }
+
surfaces_at_display_.erase(it);
}
@@ -1611,10 +1831,6 @@ void V4L2SliceVideoDecodeAccelerator::InitiateFlush() {
DVLOGF(3);
DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
- DCHECK(!decoder_flushing_);
- DCHECK_EQ(state_, kDecoding);
- state_ = kIdle;
-
// This will trigger output for all remaining surfaces in the decoder.
// However, not all of them may be decoded yet (they would be queued
// in hardware then).
@@ -1627,20 +1843,20 @@ void V4L2SliceVideoDecodeAccelerator::InitiateFlush() {
// Put the decoder in an idle state, ready to resume.
decoder_->Reset();
+ DCHECK(!decoder_flushing_);
decoder_flushing_ = true;
-
- decoder_thread_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&V4L2SliceVideoDecodeAccelerator::FinishFlushIfNeeded,
- base::Unretained(this)));
+ NewEventPending();
}
-void V4L2SliceVideoDecodeAccelerator::FinishFlushIfNeeded() {
+bool V4L2SliceVideoDecodeAccelerator::FinishFlush() {
DVLOGF(3);
DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
- if (!decoder_flushing_ || !surfaces_at_device_.empty())
- return;
+ if (!decoder_flushing_)
+ return true;
+
+ if (!surfaces_at_device_.empty())
+ return false;
DCHECK_EQ(state_, kIdle);
@@ -1658,14 +1874,13 @@ void V4L2SliceVideoDecodeAccelerator::FinishFlushIfNeeded() {
SendPictureReady();
+ decoder_flushing_ = false;
+ DVLOGF(3) << "Flush finished";
+
child_task_runner_->PostTask(FROM_HERE,
base::Bind(&Client::NotifyFlushDone, client_));
- decoder_flushing_ = false;
-
- DVLOGF(3) << "Flush finished";
- state_ = kDecoding;
- ScheduleDecodeBufferTaskIfNeeded();
+ return true;
}
void V4L2SliceVideoDecodeAccelerator::Reset() {
@@ -1688,28 +1903,27 @@ void V4L2SliceVideoDecodeAccelerator::ResetTask() {
return;
}
- DCHECK_EQ(state_, kDecoding);
- state_ = kIdle;
-
// Put the decoder in an idle state, ready to resume.
decoder_->Reset();
- decoder_resetting_ = true;
-
// Drop all remaining inputs.
decoder_current_bitstream_buffer_.reset();
while (!decoder_input_queue_.empty())
decoder_input_queue_.pop();
- FinishResetIfNeeded();
+ decoder_resetting_ = true;
+ NewEventPending();
}
-void V4L2SliceVideoDecodeAccelerator::FinishResetIfNeeded() {
+bool V4L2SliceVideoDecodeAccelerator::FinishReset() {
DVLOGF(3);
DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
- if (!decoder_resetting_ || !surfaces_at_device_.empty())
- return;
+ if (!decoder_resetting_)
+ return true;
+
+ if (!surfaces_at_device_.empty())
+ return false;
DCHECK_EQ(state_, kIdle);
DCHECK(!decoder_flushing_);
@@ -1733,14 +1947,12 @@ void V4L2SliceVideoDecodeAccelerator::FinishResetIfNeeded() {
}
decoder_resetting_ = false;
+ DVLOGF(3) << "Reset finished";
child_task_runner_->PostTask(FROM_HERE,
base::Bind(&Client::NotifyResetDone, client_));
- DVLOGF(3) << "Reset finished";
-
- state_ = kDecoding;
- ScheduleDecodeBufferTaskIfNeeded();
+ return true;
}
void V4L2SliceVideoDecodeAccelerator::SetErrorState(Error error) {
@@ -2460,7 +2672,6 @@ void V4L2SliceVideoDecodeAccelerator::OutputSurface(
DCHECK(!output_record.at_client);
DCHECK(!output_record.at_device);
- DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR);
DCHECK_NE(output_record.picture_id, -1);
output_record.at_client = true;
@@ -2566,6 +2777,11 @@ bool V4L2SliceVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
return true;
}
+media::VideoPixelFormat V4L2SliceVideoDecodeAccelerator::GetOutputFormat()
+ const {
+ return V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_);
+}
+
// static
media::VideoDecodeAccelerator::SupportedProfiles
V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles() {

Powered by Google App Engine
This is Rietveld 408576698