Index: content/common/gpu/media/exynos_video_decode_accelerator.cc |
diff --git a/content/common/gpu/media/exynos_video_decode_accelerator.cc b/content/common/gpu/media/exynos_video_decode_accelerator.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..3d44b902b20c47f8faca6ff557836b0bab8b59e1 |
--- /dev/null |
+++ b/content/common/gpu/media/exynos_video_decode_accelerator.cc |
@@ -0,0 +1,2152 @@ |
+// Copyright (c) 2012 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include <dlfcn.h> |
+#include <errno.h> |
+#include <fcntl.h> |
+#include <linux/videodev2.h> |
+#include <sys/epoll.h> |
+#include <sys/ioctl.h> |
+#include <sys/mman.h> |
+ |
+#include "base/bind.h" |
+#include "base/message_loop.h" |
+#include "base/message_loop_proxy.h" |
+#include "base/shared_memory.h" |
+#include "content/common/gpu/media/exynos_video_decode_accelerator.h" |
+#include "ui/gl/gl_bindings.h" |
+#include "ui/gl/gl_context.h" |
+#include "ui/gl/gl_context_egl.h" |
+#include "ui/gl/gl_surface_egl.h" |
+ |
+namespace content { |
+ |
+#define NOTIFY_ERROR(x) \ |
+ do { \ |
+ LOG(ERROR) << "calling NotifyError(): " << x; \ |
+ NotifyError(x); \ |
+ } while (0) |
+ |
+#define EXYNOS_MFC_DEVICE "/dev/mfc-dec" |
+#define EXYNOS_GSC_DEVICE "/dev/gsc1" |
+#define EXYNOS_MALI_DRIVER "libmali.so" |
+ |
+static void* libmali_handle = NULL; |
+static EGLBoolean(*mali_egl_image_get_buffer_ext_phandle) |
+ (EGLImageKHR, EGLint*, void*) = NULL; |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
style: here and below, opening paren belongs on pr
sheu
2012/11/01 02:16:08
Done.
|
+static EGLSyncKHR(*egl_create_sync_khr) |
+ (EGLDisplay, EGLenum, const EGLint*) = NULL; |
+static EGLBoolean(*egl_destroy_sync_khr) |
+ (EGLDisplay, EGLSyncKHR) = NULL; |
+static EGLint(*egl_client_wait_sync_khr) |
+ (EGLDisplay, EGLSyncKHR, EGLint, EGLTimeKHR) = NULL; |
+ |
+ExynosVideoDecodeAccelerator::BitstreamBufferRecord::BitstreamBufferRecord( |
+ base::SharedMemory* shm, size_t size, int32 input_id) |
+ : shm(shm), |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
self-assign?
(here and below)
sheu
2012/11/01 02:16:08
Not actually an error: the outer resolves in class
|
+ size(size), |
+ input_id(input_id) { |
+} |
+ |
+ExynosVideoDecodeAccelerator::MfcInputRecord::MfcInputRecord() { |
+ at_device = false; |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
here and below, use initializer lists for stuff th
sheu
2012/11/01 02:16:08
I figured this was more uniform-looking, but if yo
Ami GONE FROM CHROMIUM
2012/11/02 17:57:06
FWIW, initializer lists have the benefit over expl
|
+ offset = NULL; |
+ length = 0; |
+ bytes_used = 0; |
+ input_id = -1; |
+} |
+ |
+ExynosVideoDecodeAccelerator::MfcOutputRecord::MfcOutputRecord() { |
+ at_device = false; |
+ bytes_used[0] = 0; |
+ bytes_used[1] = 0; |
+ offset[0] = NULL; |
+ offset[1] = NULL; |
+ length[0] = 0; |
+ length[1] = 0; |
+ input_id = -1; |
+} |
+ |
+ExynosVideoDecodeAccelerator::GscInputRecord::GscInputRecord() { |
+ at_device = false; |
+ mfc_output = -1; |
+} |
+ |
+ExynosVideoDecodeAccelerator::GscOutputRecord::GscOutputRecord() { |
+ at_device = false; |
+ at_client = false; |
+ fd = -1; |
+ egl_image = EGL_NO_IMAGE_KHR; |
+ egl_sync = EGL_NO_SYNC_KHR; |
+ picture_id = -1; |
+} |
+ |
+ExynosVideoDecodeAccelerator::EGLImageKHRArrayRef::EGLImageKHRArrayRef( |
+ EGLDisplay egl_display, EGLImageKHR egl_images[], int egl_image_fds[], |
+ int egl_images_count) |
+ : egl_display(egl_display), |
+ egl_images(egl_images), |
+ egl_image_fds(egl_image_fds), |
+ egl_images_count(egl_images_count) { |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
ditto
|
+} |
+ |
+ExynosVideoDecodeAccelerator::EGLImageKHRArrayRef::~EGLImageKHRArrayRef() { |
+ DCHECK_EQ(egl_images != NULL, egl_image_fds != NULL); |
+ if (egl_images == NULL) |
+ return; |
+ |
+ for (int i = 0; i < egl_images_count; i += 1) { |
+ if (egl_images[i] != EGL_NO_IMAGE_KHR) |
+ eglDestroyImageKHR(egl_display, egl_images[i]); |
+ if (egl_image_fds[i] != -1) |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
If it possible for the test here and the one two l
sheu
2012/11/01 02:16:08
It can happen if we fail in AssignPictureBuffers w
|
+ close(egl_image_fds[i]); |
+ } |
+} |
+ |
+ExynosVideoDecodeAccelerator::EGLSyncKHRRef::EGLSyncKHRRef( |
+ EGLDisplay egl_display, EGLSyncKHR egl_sync) |
+ : egl_display(egl_display), |
+ egl_sync(egl_sync) { |
+} |
+ |
+ExynosVideoDecodeAccelerator::EGLSyncKHRRef::~EGLSyncKHRRef() { |
+ if (egl_sync != EGL_NO_SYNC_KHR) |
+ (*egl_destroy_sync_khr)(egl_display, egl_sync); |
+} |
+ |
+ExynosVideoDecodeAccelerator::ExynosVideoDecodeAccelerator( |
+ gfx::GLContext* gl_context, |
+ Client* client, |
+ const base::Callback<bool(void)>& make_context_current) |
+ : child_message_loop_proxy_(base::MessageLoopProxy::current()), |
+ weak_this_(base::AsWeakPtr(this)), |
+ client_ptr_factory_(client), |
+ client_(client_ptr_factory_.GetWeakPtr()), |
+ decoder_thread_("ExynosDecoderThread"), |
+ decoder_state_(kUninitialized), |
+ decoder_current_bitstream_buffer_(NULL), |
+ decoder_current_input_buffer_(-1), |
+ decoder_decode_buffer_tasks_scheduled_(0), |
+ decoder_frames_inflight_(0), |
+ decoder_frames_at_client_(0), |
+ decoder_flush_notify_requested_(false), |
+ mfc_fd_(-1), |
+ mfc_fd_closer_(&mfc_fd_), |
+ mfc_input_streamon_(false), |
+ mfc_input_buffer_count_(0), |
+ mfc_output_streamon_(false), |
+ mfc_output_buffer_count_(0), |
+ mfc_output_buffer_pixelformat_(0), |
+ gsc_fd_(-1), |
+ gsc_fd_closer_(&gsc_fd_), |
+ gsc_input_streamon_(false), |
+ gsc_input_buffer_count_(0), |
+ gsc_output_streamon_(false), |
+ gsc_output_buffer_count_(0), |
+ gsc_output_buffer_prepared_count_(0), |
+ gsc_output_buffer_queued_count_(0), |
+ frame_buffer_size_(0, 0), |
+ device_thread_("ExynosDeviceThread"), |
+ gl_context_(gl_context), |
+ make_context_current_(make_context_current), |
+ egl_context_(EGL_NO_CONTEXT), |
+ egl_display_(EGL_NO_DISPLAY), |
+ video_profile_(media::VIDEO_CODEC_PROFILE_UNKNOWN) { |
+} |
+ |
+ExynosVideoDecodeAccelerator::~ExynosVideoDecodeAccelerator() { |
+ Destroy(); |
+ |
+ // These maps have members that should be manually destroyed, e.g. file |
+ // descriptors, mmap() segments, etc. |
+ DCHECK(mfc_input_buffer_map_.empty()); |
+ DCHECK(mfc_output_buffer_map_.empty()); |
+ DCHECK(gsc_input_buffer_map_.empty()); |
+ DCHECK(gsc_output_buffer_map_.empty()); |
+} |
+ |
+bool ExynosVideoDecodeAccelerator::Initialize( |
+ media::VideoCodecProfile profile) { |
+ DVLOG(3) << "Initialize()"; |
+ DCHECK_EQ(child_message_loop_proxy_, base::MessageLoopProxy::current()); |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
here and below,
DCHECK_EQ(child_message_loop_prox
sheu
2012/11/01 02:16:08
Done.
|
+ DCHECK_EQ(decoder_state_, kUninitialized); |
+ int ret; |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
style: declare vars as near to first use as possib
sheu
2012/11/01 02:16:08
Done.
|
+ const __u32 caps_required = |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
consts get kCapsRequired-style naming
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
Please move south to first use
sheu
2012/11/01 02:16:08
Done. Used to be a problem when we had gotos :-)
|
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE | |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
why do we care about video /capture/ for this file
sheu
2012/11/01 02:16:08
Hehe. Blame V4L2.
VIDEO_OUTPUT == decoder input
Ami GONE FROM CHROMIUM
2012/11/02 17:57:06
Like I said, gonna let posciak@ review for v4l2 co
|
+ V4L2_CAP_VIDEO_OUTPUT_MPLANE | |
+ V4L2_CAP_STREAMING; |
+ |
+ switch (profile) { |
+ case media::H264PROFILE_BASELINE: |
+ DVLOG(2) << "Initialize(): profile H264PROFILE_BASELINE"; |
+ break; |
+ case media::H264PROFILE_MAIN: |
+ DVLOG(2) << "Initialize(): profile H264PROFILE_MAIN"; |
+ break; |
+ case media::H264PROFILE_HIGH: |
+ DVLOG(2) << "Initialize(): profile H264PROFILE_HIGH"; |
+ break; |
+ case media::VP8PROFILE_MAIN: |
+ DVLOG(2) << "Initialize(): profile VP8PROFILE_MAIN"; |
+ break; |
+ default: |
+ DLOG(ERROR) << "Initialize(): unsupported profile=" << profile; |
+ return false; |
+ }; |
+ video_profile_ = profile; |
+ |
+ gfx::GLContextEGL* context_egl = static_cast<gfx::GLContextEGL*>(gl_context_); |
+ static bool sandbox_initialized = PostSandboxInitialization(); |
+ if (!sandbox_initialized) { |
+ DLOG(ERROR) << "Initialize(): PostSandboxInitialization() failed"; |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
nit: if failure was in PreSandbox msg is slightly
sheu
2012/11/01 02:16:08
It'll report what failed, which should be fairly o
|
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } |
+ |
+ egl_context_ = reinterpret_cast<EGLContext>(context_egl->GetHandle()); |
+ if (egl_context_ == EGL_NO_CONTEXT) { |
+ DLOG(ERROR) << "Initialize(): could not set EGLContext"; |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
s/set/get/?
sheu
2012/11/01 02:16:08
Done.
|
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } |
+ egl_display_ = gfx::GLSurfaceEGL::GetHardwareDisplay(); |
+ if (egl_display_ == EGL_NO_DISPLAY) { |
+ DLOG(ERROR) << "Initialize(): could not set EGLDisplay"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
wouldn't life be nicer if NOTIFY_ERROR also took c
sheu
2012/11/01 02:16:08
Removed CleanupTask (see .h).
The NOTIFY_ERROR s
|
+ } |
+ |
+ // Open the video devices. |
+ DVLOG(2) << "Initialize(): opening MFC device: " << EXYNOS_MFC_DEVICE; |
+ errno = 0; |
+ mfc_fd_ = open(EXYNOS_MFC_DEVICE, O_RDWR | O_NONBLOCK); |
+ if (mfc_fd_ == -1) { |
+ DPLOG(ERROR) << |
+ "Initialize(): could not open MFC device: " << EXYNOS_MFC_DEVICE; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } |
+ DVLOG(2) << "Initialize(): opening GSC device: " << EXYNOS_GSC_DEVICE; |
+ errno = 0; |
+ gsc_fd_ = open(EXYNOS_GSC_DEVICE, O_RDWR | O_NONBLOCK); |
+ if (gsc_fd_ == -1) { |
+ DPLOG(ERROR) << |
+ "Initialize(): could not open GSC device: " << EXYNOS_GSC_DEVICE; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } |
+ |
+ // Capabilities check. |
+ struct v4l2_capability caps; |
+ errno = 0; |
+ ret = ioctl(mfc_fd_, VIDIOC_QUERYCAP, &caps); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } else if ((caps.capabilities & caps_required) != caps_required) { |
+ DLOG(ERROR) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP" |
+ ", caps check failed: 0x" << std::hex << caps.capabilities; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } |
+ errno = 0; |
+ ret = ioctl(gsc_fd_, VIDIOC_QUERYCAP, &caps); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } else if ((caps.capabilities & caps_required) != caps_required) { |
+ DLOG(ERROR) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP" |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
errors from from MFC & GSC fd's will look the same
sheu
2012/11/01 02:16:08
Yeah, I'm relying on line number here, but I don't
|
+ ", caps check failed: 0x" << std::hex << caps.capabilities; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } |
+ |
+ // Some random ioctls that Exynos requires. |
+ struct v4l2_control control; |
+ memset(&control, 0, sizeof(control)); |
+ control.id = V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY; // also VP8 |
+ control.value = 8; // Magic number. |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
random/magic doco'd somewhere?
sheu
2012/11/01 02:16:08
Updated comment. It's from Samsung. :-)
|
+ errno = 0; |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
here and elsewhere, is the errno=0 really required
sheu
2012/11/01 02:16:08
Paranoia I guess. I like to initialize to known v
|
+ ret = ioctl(mfc_fd_, VIDIOC_S_CTRL, &control); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "Initialize(): ioctl() failed: " |
+ "V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } |
+ |
+ if (!make_context_current_.Run()) { |
+ DLOG(ERROR) << "Initialize(): could not make context current"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } |
+ |
+ if (!CreateMfcInputBuffers()) { |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } |
+ |
+ // MFC output format has to be setup before streaming starts. |
+ struct v4l2_format format; |
+ memset(&format, 0, sizeof(format)); |
+ format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
+ format.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12MT_16X16; |
+ ret = ioctl(mfc_fd_, VIDIOC_S_FMT, &format); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "Initialize(): ioctl() failed: VIDIOC_S_FMT"; |
+ return false; |
+ } |
+ |
+ if (!decoder_thread_.Start()) { |
+ DLOG(ERROR) << "Initialize(): decoder thread failed to start"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } |
+ |
+ decoder_state_ = kInitialized; |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
didn't somebody tell me the child thread only touc
sheu
2012/11/01 02:16:08
Heh. Well, at this point (until return of this fu
|
+ child_message_loop_proxy_->PostTask(FROM_HERE, base::Bind( |
+ &Client::NotifyInitializeDone, client_)); |
+ return true; |
+} |
+ |
+void ExynosVideoDecodeAccelerator::Decode( |
+ const media::BitstreamBuffer& bitstream_buffer) { |
+ DVLOG(1) << "Decode(): input_id=" << bitstream_buffer.id() << |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
style: << goes on next line (even though it's an o
sheu
2012/11/01 02:16:08
Done.
|
+ ", size=" << bitstream_buffer.size(); |
+ DCHECK_EQ(child_message_loop_proxy_, base::MessageLoopProxy::current()); |
+ |
+ scoped_ptr<BitstreamBufferRecord> |
+ bitstream_record(new BitstreamBufferRecord( |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
bitstream_record could go on previous line (and de
sheu
2012/11/01 02:16:08
Done.
|
+ new base::SharedMemory(bitstream_buffer.handle(), true), |
+ bitstream_buffer.size(), bitstream_buffer.id())); |
+ if (!bitstream_record->shm->Map(bitstream_buffer.size())) { |
+ DLOG(ERROR) << "Decode(): could not map bitstream_buffer"; |
+ NOTIFY_ERROR(UNREADABLE_INPUT); |
+ return; |
+ } |
+ DVLOG(3) << "Decode(): mapped to addr=" << bitstream_record->shm->memory(); |
+ |
+ // DecodeTask() will take care of running a DecodeBufferTask(). |
+ decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind( |
+ &ExynosVideoDecodeAccelerator::DecodeTask, base::Unretained(this), |
+ base::Passed(&bitstream_record))); |
+} |
+ |
+void ExynosVideoDecodeAccelerator::AssignPictureBuffers( |
+ const std::vector<media::PictureBuffer>& buffers) { |
+ DVLOG(3) << "AssignPictureBuffers(): buffer_count=" << buffers.size(); |
+ DCHECK_EQ(child_message_loop_proxy_, base::MessageLoopProxy::current()); |
+ |
+ if (!make_context_current_.Run()) { |
+ DLOG(ERROR) << "AssignPictureBuffers(): could not make context current"; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
+ |
+ DCHECK_EQ(buffers.size(), (size_t)gsc_output_buffer_count_); |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
style: no c-style casts; reinterpret_cast<>, unfor
sheu
2012/11/01 02:16:08
Done.
|
+ scoped_ptr<EGLImageKHRArrayRef> egl_images_ref( |
+ new EGLImageKHRArrayRef( |
+ egl_display_, new EGLImageKHR[buffers.size()], |
+ new int[buffers.size()], buffers.size())); |
+ for (int i = 0; i < egl_images_ref->egl_images_count; i += 1) { |
+ egl_images_ref->egl_images[i] = EGL_NO_IMAGE_KHR; |
+ egl_images_ref->egl_image_fds[i] = -1; |
+ } |
+ |
+ const static EGLint image_attrs[] = { |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
Should this be using Gles2TextureToEglImageTransla
sheu
2012/11/01 02:16:08
My original version of EVDA did; I removed it to r
|
+ EGL_IMAGE_PRESERVED_KHR, 0, |
+ EGL_NONE, |
+ }; |
+ Display* x_display = base::MessagePumpForUI::GetDefaultXDisplay(); |
+ glActiveTexture(GL_TEXTURE0); |
+ for (int i = 0; i < egl_images_ref->egl_images_count; i += 1) { |
+ EGLImageKHR egl_image; |
+ int fd, ret; |
+ // Create the X pixmap and then create an EGLImageKHR from it, so we can |
+ // get dma_buf backing. |
+ Pixmap pixmap = XCreatePixmap(x_display, RootWindow(x_display, 0), |
+ buffers[i].size().width(), buffers[i].size().height(), 32); |
+ if (!pixmap) { |
+ DLOG(ERROR) << "AssignPictureBuffers(): could not create X pixmap"; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
+ glBindTexture(GL_TEXTURE_2D, buffers[i].texture_id()); |
+ egl_image = eglCreateImageKHR(egl_display_, EGL_NO_CONTEXT, |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
style: don't mix arg styles; either everything's +
sheu
2012/11/01 02:16:08
Done.
|
+ EGL_NATIVE_PIXMAP_KHR, (EGLClientBuffer)pixmap, image_attrs); |
+ // We can free the X pixmap immediately -- according to the |
+ // EGL_KHR_image_base spec, the backing storage does not go away until the |
+ // last referencing EGLImage is destroyed. |
+ XFreePixmap(x_display, pixmap); |
+ if (egl_image == EGL_NO_IMAGE_KHR) { |
+ DLOG(ERROR) << "AssignPictureBuffers(): could not create EGLImageKHR"; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
+ egl_images_ref->egl_images[i] = egl_image; |
+ ret = (*mali_egl_image_get_buffer_ext_phandle)( |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
(*foo)(...) == foo(...)
and the latter is less mes
sheu
2012/11/01 02:16:08
Another bit of pedantry -- it's a function pointer
|
+ egl_images_ref->egl_images[i], NULL, &fd); |
+ if (!ret) { |
+ DLOG(ERROR) << |
+ "AssignPictureBuffers(): could not get EGLImageKHR dmabuf fd"; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
+ egl_images_ref->egl_image_fds[i] = fd; |
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, egl_image); |
+ } |
+ decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind( |
+ &ExynosVideoDecodeAccelerator::AssignPictureBuffersTask, |
+ base::Unretained(this), base::Passed(&egl_images_ref))); |
+} |
+ |
+void ExynosVideoDecodeAccelerator::ReusePictureBuffer(int32 picture_buffer_id) { |
+ DVLOG(3) << "ReusePictureBuffer(): picture_buffer_id=" << picture_buffer_id; |
+ // Must be run on API thread, as we're inserting a sync in the EGL context. |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
s/API/Child/
but I think this comment really appli
sheu
2012/11/01 02:16:08
Updated comment, but I think it still makes sense.
|
+ DCHECK_EQ(child_message_loop_proxy_, base::MessageLoopProxy::current()); |
+ |
+ if (!make_context_current_.Run()) { |
+ DLOG(ERROR) << "ReusePictureBuffer(): could not make context current"; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
+ |
+ EGLSyncKHR egl_sync; |
+ egl_sync = (*egl_create_sync_khr)(egl_display_, EGL_SYNC_FENCE_KHR, NULL); |
+ if (egl_sync == EGL_NO_SYNC_KHR) { |
+ DLOG(ERROR) << "ReusePictureBuffer(): eglCreateSyncKHR() failed"; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
+ |
+ scoped_ptr<EGLSyncKHRRef> egl_sync_ref(new EGLSyncKHRRef( |
+ egl_display_, egl_sync)); |
+ decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind( |
+ &ExynosVideoDecodeAccelerator::ReusePictureBufferTask, |
+ base::Unretained(this), picture_buffer_id, base::Passed(&egl_sync_ref))); |
+ return; |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
drop
sheu
2012/11/01 02:16:08
Done.
|
+} |
+ |
+void ExynosVideoDecodeAccelerator::Flush() { |
+ DVLOG(3) << "Flush()"; |
+ DCHECK_EQ(child_message_loop_proxy_, base::MessageLoopProxy::current()); |
+ decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind( |
+ &ExynosVideoDecodeAccelerator::FlushTask, base::Unretained(this))); |
+} |
+ |
+void ExynosVideoDecodeAccelerator::Reset() { |
+ DVLOG(3) << "Reset()"; |
+ DCHECK_EQ(child_message_loop_proxy_, base::MessageLoopProxy::current()); |
+ decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind( |
+ &ExynosVideoDecodeAccelerator::ResetTask, base::Unretained(this))); |
+} |
+ |
+void ExynosVideoDecodeAccelerator::Destroy() { |
+ DVLOG(3) << "Destroy()"; |
+ DCHECK_EQ(child_message_loop_proxy_, base::MessageLoopProxy::current()); |
+ |
+ // We're destroying; cancel all callbacks. |
+ client_ptr_factory_.InvalidateWeakPtrs(); |
+ |
+ // If the decoder thread is running, destroy from there. |
+ if (decoder_thread_.IsRunning()) { |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
This /looks/ racy (although is probably not b/c th
sheu
2012/11/01 02:16:08
Good point. I presume that message_loop_proxy() i
|
+ decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind( |
+ &ExynosVideoDecodeAccelerator::DestroyTask, base::Unretained(this))); |
+ |
+ // DestroyTask() will cause the decoder_thread_ to flush all tasks. |
+ decoder_thread_.Stop(); |
+ } else { |
+ // Otherwise, destroy directly. |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
This implies DestroyTask is named unfortunately.
sheu
2012/11/01 02:16:08
Not sure what you mean here. DestroyTask() is a t
|
+ DestroyTask(); |
+ } |
+ |
+ // Nuke the entire site from orbit -- it's the only way to be sure. |
+ if (gsc_fd_ != -1) { |
+ DestroyGscInputBuffers(); |
+ DestroyGscOutputBuffers(); |
+ close(gsc_fd_); |
+ gsc_fd_ = -1; |
+ } |
+ if (mfc_fd_ != -1) { |
+ DestroyMfcInputBuffers(); |
+ DestroyMfcOutputBuffers(); |
+ close(mfc_fd_); |
+ mfc_fd_ = -1; |
+ } |
+ |
+ decoder_state_ = kError; |
+} |
+ |
+// static |
+void ExynosVideoDecodeAccelerator::PreSandboxInitialization() { |
+ DVLOG(3) << "PreSandboxInitialization()"; |
+ errno = 0; |
+ libmali_handle = dlopen(EXYNOS_MALI_DRIVER, RTLD_LAZY | RTLD_LOCAL); |
+ if (libmali_handle == NULL) { |
+ DPLOG(ERROR) << "failed to dlopen() " << EXYNOS_MALI_DRIVER; |
+ } |
+} |
+ |
+// static |
+bool ExynosVideoDecodeAccelerator::PostSandboxInitialization() { |
+ DVLOG(3) << "PostSandboxInitialization()"; |
+ if (libmali_handle == NULL) { |
+ DLOG(ERROR) << "PostSandboxInitialization(): no " << EXYNOS_MALI_DRIVER |
+ " driver handle"; |
+ return false; |
+ } |
+ |
+ errno = 0; |
+ mali_egl_image_get_buffer_ext_phandle = |
+ reinterpret_cast<EGLBoolean(*)(EGLImageKHR, EGLint*, void*)>( |
+ dlsym(libmali_handle, "mali_egl_image_get_buffer_ext_phandle")); |
+ if (mali_egl_image_get_buffer_ext_phandle == NULL) { |
+ DPLOG(ERROR) << "PostSandboxInitialization(): failed to dlsym()" |
+ " mali_egl_image_get_buffer_ext_phandle"; |
+ return false; |
+ } |
+ |
+ errno = 0; |
+ egl_create_sync_khr = |
+ reinterpret_cast<EGLSyncKHR(*)(EGLDisplay, EGLenum, const EGLint*)>( |
+ dlsym(libmali_handle, "eglCreateSyncKHR")); |
+ if (egl_create_sync_khr == NULL) { |
+ DPLOG(ERROR) << "PostSandboxInitialization(): failed to dlsym()" |
+ " eglCreateSyncKHR"; |
+ return false; |
+ } |
+ |
+ errno = 0; |
+ egl_destroy_sync_khr = |
+ reinterpret_cast<EGLBoolean(*)(EGLDisplay, EGLSyncKHR)>( |
+ dlsym(libmali_handle, "eglDestroySyncKHR")); |
+ if (egl_destroy_sync_khr == NULL) { |
+ DPLOG(ERROR) << "PostSandboxInitialization(): failed to dlsym()" |
+ " eglDestroySyncKHR"; |
+ return false; |
+ } |
+ |
+ errno = 0; |
+ egl_client_wait_sync_khr = |
+ reinterpret_cast<EGLint(*)(EGLDisplay, EGLSyncKHR, EGLint, EGLTimeKHR)>( |
+ dlsym(libmali_handle, "eglClientWaitSyncKHR")); |
+ if (egl_client_wait_sync_khr == NULL) { |
+ DPLOG(ERROR) << "PostSandboxInitialization(): failed to dlsym()" |
+ " eglClientWaitSyncKHR"; |
+ return false; |
+ } |
+ |
+ return true; |
+} |
+ |
+void ExynosVideoDecodeAccelerator::DecodeTask( |
+ scoped_ptr<BitstreamBufferRecord> bitstream_record) { |
+ DVLOG(3) << "DecodeTask(): input_id=" << bitstream_record->input_id; |
+ DCHECK_EQ(decoder_thread_.message_loop(), MessageLoop::current()); |
+ DCHECK_NE(decoder_state_, kUninitialized); |
+ |
+ if (decoder_state_ == kResetting) { |
+ DVLOG(2) << "DecodeTask(): early out: kResetting state"; |
+ return; |
+ } else if (decoder_state_ == kError) { |
+ DVLOG(2) << "DecodeTask(): early out: kError state"; |
+ return; |
+ } |
+ |
+ decoder_input_queue_.push_front( |
+ linked_ptr<BitstreamBufferRecord>(bitstream_record.release())); |
+ decoder_decode_buffer_tasks_scheduled_ += 1; |
+ DecodeBufferTask(); |
+} |
+ |
+void ExynosVideoDecodeAccelerator::DecodeBufferTask() { |
+ DVLOG(3) << "DecodeBufferTask()"; |
+ DCHECK_EQ(decoder_thread_.message_loop(), MessageLoop::current()); |
+ DCHECK_NE(decoder_state_, kUninitialized); |
+ |
+ decoder_decode_buffer_tasks_scheduled_ -= 1; |
+ |
+ if (decoder_state_ == kResetting) { |
+ DVLOG(2) << "DecodeBufferTask(): early out: kResetting state"; |
+ return; |
+ } else if (decoder_state_ == kError) { |
+ DVLOG(2) << "DecodeBufferTask(): early out: kError state"; |
+ return; |
+ } |
+ |
+ if (decoder_current_bitstream_buffer_ == NULL) { |
+ if (decoder_input_queue_.empty()) { |
+ // We're waiting for a new buffer -- exit without scheduling a new task. |
+ return; |
+ } |
+ // Setup to use the next buffer. |
+ decoder_current_bitstream_buffer_.reset( |
+ decoder_input_queue_.back().release()); |
+ decoder_input_queue_.pop_back(); |
+ DVLOG(3) << "DecodeBufferTask(): reading input_id=" << |
+ decoder_current_bitstream_buffer_->input_id << ", addr=" << |
+ decoder_current_bitstream_buffer_->shm->memory() << ", size=" << |
+ decoder_current_bitstream_buffer_->size; |
+ } |
+ bool decode_result = false; |
+ const void* data = decoder_current_bitstream_buffer_->shm->memory(); |
+ size_t size = decoder_current_bitstream_buffer_->size; |
+ switch (decoder_state_) { |
+ case kInitialized: |
+ case kAfterReset: |
+ decode_result = DecodeBufferInitial(data, size); |
+ break; |
+ case kDecoding: |
+ decode_result = DecodeBufferContinue(data, size); |
+ break; |
+ default: |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(ILLEGAL_STATE); |
+ return; |
+ } |
+ if (decoder_state_ == kError) { |
+ // Failed during decode. |
+ return; |
+ } else if (!decode_result) { |
+ // We might not have failed decode completely, but returned false due to |
+ // insufficient resources, etc. Retry this this buffer later; exit without |
+ // scheduling another task. |
+ return; |
+ } |
+ |
+ // Our current bitstream buffer is done; return it. |
+ int32 input_id = decoder_current_bitstream_buffer_->input_id; |
+ DVLOG(3) << "DecodeBufferTask(): finished input_id=" << input_id; |
+ decoder_current_bitstream_buffer_.reset(NULL); |
+ child_message_loop_proxy_->PostTask(FROM_HERE, base::Bind( |
+ &Client::NotifyEndOfBitstreamBuffer, client_, input_id)); |
+ |
+ // If we're behind on tasks, schedule another one. |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
Is this equivalent to testing whether the input bu
sheu
2012/11/01 02:16:08
Not necessarily; each run through DecodeBufferTask
|
+ if ((size_t)decoder_decode_buffer_tasks_scheduled_ < |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
c-style cast
sheu
2012/11/01 02:16:08
Done.
|
+ decoder_input_queue_.size()) { |
+ decoder_decode_buffer_tasks_scheduled_ += 1; |
+ decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind( |
+ &ExynosVideoDecodeAccelerator::DecodeBufferTask, |
+ base::Unretained(this))); |
+ } |
+} |
+ |
+bool ExynosVideoDecodeAccelerator::DecodeBufferInitial( |
+ const void* data, size_t size) { |
+ DVLOG(3) << "DecodeBufferInitial(): data=" << data << ", size=" << size; |
+ DCHECK_EQ(decoder_thread_.message_loop(), MessageLoop::current()); |
+ DCHECK_NE(decoder_state_, kUninitialized); |
+ DCHECK_NE(decoder_state_, kDecoding); |
+ DCHECK(!device_thread_.IsRunning()); |
+ // Initial decode. We haven't been able to get output stream format info yet. |
+ // Get it, and start decoding. |
+ int ret; |
+ |
+ // Copy in and send to HW. |
+ if (!AppendToInputFrame(data, size) || !FlushInputFrame()) |
+ return false; |
+ |
+ // Recycle buffers. |
+ DequeueMfc(); |
+ |
+ // Check and see if we have format info yet. |
+ struct v4l2_format format; |
+ format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
+ errno = 0; |
+ ret = ioctl(mfc_fd_, VIDIOC_G_FMT, &format); |
+ if (ret != 0) { |
+ if (errno == EINVAL) { |
+ // We will get EINVAL if we haven't seen sufficient stream to decode the |
+ // format. Return true and go to the next buffer. |
+ return true; |
+ } else { |
+ DPLOG(ERROR) << "DecodeBufferInitial(): ioctl() failed: VIDIOC_G_FMT"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } |
+ } |
+ |
+ // Run this initialization only on first startup. |
+ if (decoder_state_ == kInitialized) { |
+ DVLOG(3) << "DecodeBufferInitial(): running one-time initialization"; |
+ // Success! Setup our parameters. |
+ DCHECK_EQ(format.fmt.pix_mp.num_planes, 2); |
+ // We don't handle midstream resizes right now. |
+ if (frame_buffer_size_.width() != 0 || frame_buffer_size_.height() != 0) { |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
!IsEmpty()
sheu
2012/11/01 02:16:08
Done.
|
+ if (frame_buffer_size_.width() != (int)format.fmt.pix_mp.width || |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
frame_buffer_size_ != gfx::Size(format.fmt.pix_mp.
sheu
2012/11/01 02:16:08
I'm a bit disinclined to create objects just to co
|
+ frame_buffer_size_.height() != (int)format.fmt.pix_mp.height) { |
+ // We don't handle mistream resizes right now. |
+ NOTIMPLEMENTED(); |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(UNREADABLE_INPUT); |
+ return false; |
+ } |
+ } |
+ frame_buffer_size_.SetSize( |
+ format.fmt.pix_mp.width, format.fmt.pix_mp.height); |
+ mfc_output_buffer_size_[0] = format.fmt.pix_mp.plane_fmt[0].sizeimage; |
+ mfc_output_buffer_size_[1] = format.fmt.pix_mp.plane_fmt[1].sizeimage; |
+ mfc_output_buffer_pixelformat_ = format.fmt.pix_mp.pixelformat; |
+ |
+ // Create our other buffers. |
+ if (!CreateMfcOutputBuffers() || !CreateGscInputBuffers() || |
+ !CreateGscOutputBuffers()) { |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } |
+ } |
+ |
+ // StartDevice will raise the error if there is one. |
+ if (!StartDevice()) { |
+ return false; |
+ } |
+ |
+ decoder_state_ = kDecoding; |
+ |
+ // This buffer contained the header that kicks off decoding of the video |
+ // stream. Return false here so it gets recycled into the next |
+ // DecodeBufferContinue() and the start of actual output stream. |
+ return false; |
+} |
+ |
+bool ExynosVideoDecodeAccelerator::DecodeBufferContinue( |
+ const void* data, size_t size) { |
+ DVLOG(3) << "DecodeBufferContinue(): data=" << data << ", size=" << size; |
+ DCHECK_EQ(decoder_thread_.message_loop(), MessageLoop::current()); |
+ DCHECK_EQ(decoder_state_, kDecoding); |
+ |
+ // We've already setup our output stream parameters, so just keep on truckin'. |
+ if (!AppendToInputFrame(data, size) || !FlushInputFrame()) |
+ return false; |
+ |
+ return true; |
+} |
+ |
+bool ExynosVideoDecodeAccelerator::AppendToInputFrame( |
+ const void* data, size_t size) { |
+ DVLOG(3) << "AppendToInputFrame()"; |
+ DCHECK_EQ(decoder_thread_.message_loop(), MessageLoop::current()); |
+ // We should have started streaming when we created MFC input buffers. |
+ DCHECK(mfc_input_streamon_); |
+ // Device thread should be running. |
+ DCHECK_EQ(device_thread_.IsRunning(), true); |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
What changed since l.656 where this was false, and
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
DCHECK_EQ(Foo(), true)
is more readable as
DCHECK(
sheu
2012/11/01 02:16:08
Done.
|
+ DCHECK(decoder_state_ == kInitialized || decoder_state_ == kDecoding); |
+ |
+ // Flush if we're too big |
+ if (decoder_current_input_buffer_ != -1) { |
+ MfcInputRecord& input_record = |
+ mfc_input_buffer_map_[decoder_current_input_buffer_]; |
+ if (input_record.bytes_used + size > input_record.length) { |
+ if (!FlushInputFrame()) |
+ return false; |
+ decoder_current_input_buffer_ = -1; |
+ } |
+ } |
+ |
+ // Try to get an available input buffer |
+ if (decoder_current_input_buffer_ == -1) { |
+ if (mfc_free_input_buffers_.empty()) { |
+ // See if we can get more free buffers from HW |
+ DequeueMfc(); |
+ if (mfc_free_input_buffers_.empty()) { |
+ // Nope! |
+ DVLOG(2) << "AppendToInputFrame(): stalled for input buffers"; |
+ return false; |
+ } |
+ } |
+ decoder_current_input_buffer_ = mfc_free_input_buffers_.back(); |
+ mfc_free_input_buffers_.pop_back(); |
+ MfcInputRecord& input_record = |
+ mfc_input_buffer_map_[decoder_current_input_buffer_]; |
+ DCHECK_EQ(input_record.bytes_used, 0); |
+ DCHECK_EQ(input_record.input_id, -1); |
+ DCHECK(decoder_current_bitstream_buffer_ != NULL); |
+ input_record.input_id = decoder_current_bitstream_buffer_->input_id; |
+ } |
+ |
+ // Copy in to the buffer. |
+ MfcInputRecord& input_record = |
+ mfc_input_buffer_map_[decoder_current_input_buffer_]; |
+ if (size > input_record.length - input_record.bytes_used) { |
+ LOG(ERROR) << "AppendToInputFrame(): over-size frame, truncating"; |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
where is input_record.length populated?
Is it real
sheu
2012/11/01 02:16:08
The length gets filled in in CreateMfcInputBuffers
|
+ size = input_record.length - input_record.bytes_used; |
+ } |
+ memcpy((char*)input_record.offset + input_record.bytes_used, data, size); |
+ input_record.bytes_used += size; |
+ |
+ return true; |
+} |
+ |
+bool ExynosVideoDecodeAccelerator::FlushInputFrame() { |
+ DVLOG(3) << "FlushInputFrame()"; |
+ DCHECK_EQ(decoder_thread_.message_loop(), MessageLoop::current()); |
+ DCHECK(decoder_state_ == kInitialized || decoder_state_ == kDecoding); |
+ if (decoder_current_input_buffer_ == -1) |
+ return true; |
+ |
+ MfcInputRecord& input_record = |
+ mfc_input_buffer_map_[decoder_current_input_buffer_]; |
+ if (input_record.bytes_used == 0) |
+ return true; |
+ |
+ // Queue it to MSC. |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
typo: MSC
|
+ mfc_input_ready_queue_.push_back(decoder_current_input_buffer_); |
+ decoder_frames_inflight_ += 1; |
+ decoder_current_input_buffer_ = -1; |
+ DVLOG(3) << "FlushInputFrame(): submitting input_id=" << |
+ input_record.input_id; |
+ // Kick the MSC once since there's new available input for it. |
+ EnqueueMfc(); |
+ if (decoder_state_ == kError) |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
l.818-821 is:
return decoder_state_ != kError;
sheu
2012/11/01 02:16:08
Sure.
|
+ return false; |
+ |
+ return true; |
+} |
+ |
+void ExynosVideoDecodeAccelerator::AssignPictureBuffersTask( |
+ scoped_ptr<EGLImageKHRArrayRef> egl_images_ref) { |
+ DVLOG(3) << "AssignPictureBuffersTask()"; |
+ DCHECK_EQ(decoder_thread_.message_loop(), MessageLoop::current()); |
+ DCHECK_NE(decoder_state_, kUninitialized); |
+ |
+ // We run AssignPictureBuffersTask even if we're in kResetting. |
+ if (decoder_state_ == kError) { |
+ DVLOG(2) << "AssignPictureBuffersTask(): early out: kError state"; |
+ return; |
+ } |
+ |
+ DCHECK_EQ(gsc_output_buffer_map_.size(), |
+ (size_t)egl_images_ref->egl_images_count); |
+ for (size_t i = 0; i < gsc_output_buffer_map_.size(); i += 1) { |
+ // We should be blank right now. |
+ GscOutputRecord& output_record = gsc_output_buffer_map_[i]; |
+ DCHECK_EQ(output_record.fd, -1); |
+ DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR); |
+ DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); |
+ DCHECK_EQ(output_record.picture_id, -1); |
+ output_record.fd = egl_images_ref->egl_image_fds[i]; |
+ output_record.egl_image = egl_images_ref->egl_images[i]; |
+ output_record.picture_id = i; |
+ |
+ // Take ownership of the EGLImage and fd. |
+ egl_images_ref->egl_images[i] = EGL_NO_IMAGE_KHR; |
+ egl_images_ref->egl_image_fds[i] = -1; |
+ // And add this buffer to the free list. |
+ gsc_free_output_buffers_.push_front(i); |
+ } |
+ |
+ // StartDevice will raise the error if there is one. |
+ StartDevice(); |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
We can only get here through AssignPictureBuffers
sheu
2012/11/01 02:16:08
Right after initialization, we get calls to Decode
Ami GONE FROM CHROMIUM
2012/11/02 17:57:06
Wow. Please comment both calls to indicate which
|
+} |
+ |
+void ExynosVideoDecodeAccelerator::ServiceDeviceTask() { |
+ DVLOG(3) << "ServiceDeviceTask()"; |
+ DCHECK_EQ(decoder_thread_.message_loop(), MessageLoop::current()); |
+ DCHECK_NE(decoder_state_, kUninitialized); |
+ DCHECK_NE(decoder_state_, kInitialized); |
+ DCHECK_NE(decoder_state_, kAfterReset); |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
How can you guarantee this?
sheu
2012/11/01 02:16:08
We only get put into kAfterReset after a ResetDone
|
+ |
+ if (decoder_state_ == kResetting) { |
+ DVLOG(2) << "ServiceDeviceTask(): early out: kResetting state"; |
+ return; |
+ } else if (decoder_state_ == kError) { |
+ DVLOG(2) << "ServiceDeviceTask(): early out: kError state"; |
+ return; |
+ } |
+ |
+ DequeueMfc(); |
+ DequeueGsc(); |
+ EnqueueMfc(); |
+ EnqueueGsc(); |
+ |
+ DVLOG(1) << "ServiceDeviceTask(): buffer counts: DEC[" << |
+ decoder_input_queue_.size() << "->" << |
+ mfc_input_ready_queue_.size() << "] => MSC[" << |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
typo: MSC
(just do a global search for this, would
sheu
2012/11/01 02:16:08
:-)
|
+ mfc_free_input_buffers_.size() << "/" << |
+ mfc_input_buffer_count_ << "->" << |
+ mfc_free_output_buffers_.size() << "/" << |
+ mfc_output_buffer_count_ << "] => " << |
+ mfc_output_gsc_input_queue_.size() << " => GSC[" << |
+ gsc_free_input_buffers_.size() << "/" << |
+ gsc_input_buffer_count_ << "->" << |
+ gsc_free_output_buffers_.size() << "/" << |
+ gsc_output_buffer_count_ << "] => VDA[" << |
+ decoder_frames_at_client_ << "]"; |
+} |
+ |
+void ExynosVideoDecodeAccelerator::EnqueueMfc() { |
+ DVLOG(3) << "EnqueueMfc()"; |
+ DCHECK_EQ(decoder_thread_.message_loop(), MessageLoop::current()); |
+ DCHECK_NE(decoder_state_, kUninitialized); |
+ |
+ int ret; |
+ struct v4l2_buffer qbuf; |
+ struct v4l2_plane qbuf_planes[2]; |
+ |
+ // Drain the pipe of completed decode buffers. |
+ while (!mfc_input_ready_queue_.empty()) { |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
how can this queue ever contain more than a single
sheu
2012/11/01 02:16:08
If the decoder is backed up, we can have the clien
|
+ // Enqueue the MFC input (VIDEO_OUTPUT) buffer. |
+ int buffer = mfc_input_ready_queue_.back(); |
+ MfcInputRecord& input_record = mfc_input_buffer_map_[buffer]; |
+ DCHECK_EQ(input_record.at_device, false); |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
drop the _EQ
sheu
2012/11/01 02:16:08
Did some regexing on this.
|
+ memset(&qbuf, 0, sizeof(qbuf)); |
+ memset(qbuf_planes, 0, sizeof(qbuf_planes)); |
+ qbuf.index = buffer; |
+ qbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
I'm going to defer to posciak@ for reviewing the v
|
+ qbuf.timestamp.tv_sec = input_record.input_id; |
+ qbuf.memory = V4L2_MEMORY_MMAP; |
+ qbuf.m.planes = qbuf_planes; |
+ qbuf.m.planes[0].bytesused = input_record.bytes_used; |
+ qbuf.length = 1; |
+ errno = 0; |
+ ret = ioctl(mfc_fd_, VIDIOC_QBUF, &qbuf); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "EnqueueMfc(): ioctl() failed: VIDIOC_QBUF"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
+ input_record.at_device = true; |
+ mfc_input_ready_queue_.pop_back(); |
+ DVLOG(3) << "EnqueueMfc(): enqueued input_id=" << input_record.input_id; |
+ if (!mfc_input_streamon_) { |
+ __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; |
+ errno = 0; |
+ ret = ioctl(mfc_fd_, VIDIOC_STREAMON, &type); |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
Shouldn't this be done much earlier, during initia
sheu
2012/11/01 02:16:08
Nope. V4L2 requires that you have at least one bu
|
+ if (ret != 0) { |
+ DPLOG(ERROR) << "EnqueueMfc(): ioctl() failed: VIDIOC_STREAMON"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
+ mfc_input_streamon_ = true; |
+ } |
+ } |
+ |
+ // Enqueue all the MFC output (VIDEO_CAPTURE) buffers we can. |
+ while (!mfc_free_output_buffers_.empty()) { |
+ int buffer = mfc_free_output_buffers_.back(); |
+ MfcOutputRecord& output_record = mfc_output_buffer_map_[buffer]; |
+ DCHECK_EQ(output_record.at_device, false); |
+ DCHECK_EQ(output_record.input_id, -1); |
+ memset(&qbuf, 0, sizeof(qbuf)); |
+ memset(qbuf_planes, 0, sizeof(qbuf_planes)); |
+ qbuf.index = buffer; |
+ qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
+ qbuf.memory = V4L2_MEMORY_MMAP; |
+ qbuf.m.planes = qbuf_planes; |
+ qbuf.length = 2; |
+ errno = 0; |
+ ret = ioctl(mfc_fd_, VIDIOC_QBUF, &qbuf); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "EnqueueMfc(): ioctl() failed: VIDIOC_QBUF"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
+ output_record.at_device = true; |
+ mfc_free_output_buffers_.pop_back(); |
+ if (!mfc_output_streamon_) { |
+ __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
+ errno = 0; |
+ ret = ioctl(mfc_fd_, VIDIOC_STREAMON, &type); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "EnqueueMfc(): ioctl() failed: VIDIOC_STREAMON"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
+ mfc_output_streamon_ = true; |
+ } |
+ } |
+} |
+ |
+void ExynosVideoDecodeAccelerator::DequeueMfc() { |
+ DVLOG(3) << "DequeueMfc()"; |
+ DCHECK_EQ(decoder_thread_.message_loop(), MessageLoop::current()); |
+ DCHECK_NE(decoder_state_, kUninitialized); |
+ DCHECK_NE(decoder_state_, kInitialized); |
+ DCHECK_NE(decoder_state_, kAfterReset); |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
I don't think you can guarantee this
sheu
2012/11/01 02:16:08
Ah, turns out you are correct here, since we can g
|
+ |
+ int ret; |
+ struct v4l2_buffer dqbuf; |
+ struct v4l2_plane planes[2]; |
+ |
+ // Dequeue completed MFC input (VIDEO_OUTPUT) buffers, and recycle to the free |
+ // list. Note that if we ever run completely dry of input buffers on the MFC |
+ // device, epoll() will return EPOLLERR and our DeviceTask() will exit early. |
+ // Work around this by never _completely_ draining the MFC input queue. |
+ const int decoder_buffers = mfc_input_ready_queue_.size() + |
+ (decoder_current_input_buffer_ != -1 ? 1 : 0); |
+ while ((size_t)mfc_input_buffer_count_ > |
+ decoder_buffers + mfc_free_input_buffers_.size() + 1) { |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
I don't understand l.993-1000. Can you explain?
sheu
2012/11/01 02:16:08
So.
We don't want to completely drain MFC. So we
|
+ memset(&dqbuf, 0, sizeof(dqbuf)); |
+ dqbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; |
+ dqbuf.memory = V4L2_MEMORY_MMAP; |
+ errno = 0; |
+ ret = ioctl(mfc_fd_, VIDIOC_DQBUF, &dqbuf); |
+ if (ret != 0) { |
+ if (errno == EAGAIN) { |
+ // Done all that we could. |
+ break; |
+ } else if (errno == EINVAL) { |
+ // We're not streaming this queue; skip. |
+ DVLOG(2) << "DequeueMfc(): VIDEO_OUTPUT not streaming, skipping"; |
+ break; |
+ } else { |
+ DPLOG(ERROR) << "DequeueMfc(): ioctl() failed: VIDIOC_DQBUF"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
+ } |
+ MfcInputRecord& input_record = mfc_input_buffer_map_[dqbuf.index]; |
+ DCHECK_EQ(input_record.at_device, true); |
+ input_record.at_device = false; |
+ input_record.bytes_used = 0; |
+ input_record.input_id = -1; |
+ mfc_free_input_buffers_.push_back(dqbuf.index); |
+ } |
+ |
+ // Dequeue completed MFC output (VIDEO_CAPTURE) buffers, and queue to the |
+ // completed queue. |
+ memset(&dqbuf, 0, sizeof(dqbuf)); |
+ memset(planes, 0, sizeof(planes)); |
+ dqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
+ dqbuf.memory = V4L2_MEMORY_MMAP; |
+ dqbuf.m.planes = planes; |
+ dqbuf.length = 2; |
+ errno = 0; |
+ ret = ioctl(mfc_fd_, VIDIOC_DQBUF, &dqbuf); |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
move the ioctl into the while condition to avoid h
sheu
2012/11/01 02:16:08
Done.
|
+ while (ret == 0) { |
+ MfcOutputRecord& output_record = mfc_output_buffer_map_[dqbuf.index]; |
+ DCHECK_EQ(output_record.at_device, true); |
+ output_record.at_device = false; |
+ output_record.input_id = dqbuf.timestamp.tv_sec; |
+ output_record.bytes_used[0] = dqbuf.m.planes[0].bytesused; |
+ output_record.bytes_used[1] = dqbuf.m.planes[1].bytesused; |
+ DVLOG(3) << "DequeueMfc(): dequeued input_id=" << dqbuf.timestamp.tv_sec; |
+ mfc_output_gsc_input_queue_.push_front(dqbuf.index); |
+ memset(&dqbuf, 0, sizeof(dqbuf)); |
+ memset(planes, 0, sizeof(planes)); |
+ dqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
+ dqbuf.memory = V4L2_MEMORY_MMAP; |
+ dqbuf.m.planes = planes; |
+ dqbuf.length = 2; |
+ errno = 0; |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
unfortch that l.1048-1054 is a copy of l.1031-1037
|
+ ret = ioctl(mfc_fd_, VIDIOC_DQBUF, &dqbuf); |
+ } |
+ if (errno == EINVAL) { |
+ // We're not streaming this queue; skip. |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
what does this mean? In what scenario can this ha
sheu
2012/11/01 02:16:08
We could be streaming the OUTPUT but not the CAPTU
|
+ DVLOG(2) << "DequeueMfc(): VIDEO_CAPTURE not streaming, skipping"; |
+ } else if (errno != EAGAIN) { |
+ DPLOG(ERROR) << "DequeueMfc(): ioctl() failed: VIDIOC_DQBUF"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
+} |
+ |
+void ExynosVideoDecodeAccelerator::EnqueueGsc() { |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
Made it down to here and ran out of steam.
sheu
2012/11/01 02:16:08
Halfway there!
|
+ DVLOG(3) << "EnqueueGsc()"; |
+ DCHECK_EQ(decoder_thread_.message_loop(), MessageLoop::current()); |
+ DCHECK_NE(decoder_state_, kUninitialized); |
+ DCHECK_NE(decoder_state_, kInitialized); |
+ DCHECK_NE(decoder_state_, kAfterReset); |
+ |
+ int ret; |
+ struct v4l2_buffer qbuf; |
+ struct v4l2_plane qbuf_planes[2]; |
+ |
+ // Drain the pipe of completed MFC output buffers. |
+ DCHECK_EQ(gsc_output_streamon_, true); |
+ while (!mfc_output_gsc_input_queue_.empty()) { |
+ if (gsc_free_input_buffers_.empty()) |
+ break; |
+ // Bug workaround: GSC is liable to race conditions if more than one |
+ // buffer is simultaneously queued. |
+ if (gsc_output_buffer_queued_count_ > 0) |
+ break; |
+ if (gsc_output_buffer_prepared_count_ == 0) { |
+ // Enqueue a GSC output (VIDEO_CAPTURE) buffer for the incoming GSC input |
+ // buffer. |
+ if (gsc_free_output_buffers_.empty()) |
+ break; |
+ int buffer = gsc_free_output_buffers_.back(); |
+ GscOutputRecord& output_record = gsc_output_buffer_map_[buffer]; |
+ DCHECK_EQ(output_record.at_device, false); |
+ DCHECK_EQ(output_record.at_client, false); |
+ if (output_record.egl_sync != EGL_NO_SYNC_KHR) { |
+ // If we have to wait for completion, wait. Note that |
+ // gsc_free_output_buffers_ is a FIFO queue. |
+ (*egl_client_wait_sync_khr)(egl_display_, output_record.egl_sync, 0, |
+ EGL_FOREVER_KHR); |
+ (*egl_destroy_sync_khr)(egl_display_, output_record.egl_sync); |
+ output_record.egl_sync = EGL_NO_SYNC_KHR; |
+ } |
+ memset(&qbuf, 0, sizeof(qbuf)); |
+ memset(qbuf_planes, 0, sizeof(qbuf_planes)); |
+ qbuf.index = buffer; |
+ qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
+ qbuf.memory = V4L2_MEMORY_DMABUF; |
+ qbuf.m.planes = qbuf_planes; |
+ qbuf.m.planes[0].m.fd = output_record.fd; |
+ qbuf.length = 1; |
+ errno = 0; |
+ ret = ioctl(gsc_fd_, VIDIOC_QBUF, &qbuf); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "EnqueueGsc(): ioctl() failed: VIDIOC_QBUF"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
+ output_record.at_device = true; |
+ gsc_free_output_buffers_.pop_back(); |
+ gsc_output_buffer_prepared_count_ += 1; |
+ } |
+ // Now enqueue the GSC input (VIDEO_OUTPUT) buffer for the complete MFC |
+ // output buffer. We defer requeuing the MFC output buffer to its free |
+ // list, as the GSC input will be using its data. |
+ int mfc_buffer, gsc_buffer; |
+ mfc_buffer = mfc_output_gsc_input_queue_.back(); |
+ gsc_buffer = gsc_free_input_buffers_.back(); |
+ MfcOutputRecord& output_record = mfc_output_buffer_map_[mfc_buffer]; |
+ DCHECK_EQ(output_record.at_device, false); |
+ GscInputRecord& input_record = gsc_input_buffer_map_[gsc_buffer]; |
+ DCHECK_EQ(input_record.at_device, false); |
+ DCHECK_EQ(input_record.mfc_output, -1); |
+ memset(&qbuf, 0, sizeof(qbuf)); |
+ memset(qbuf_planes, 0, sizeof(qbuf_planes)); |
+ qbuf.index = gsc_buffer; |
+ qbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; |
+ qbuf.timestamp.tv_sec = output_record.input_id; |
+ qbuf.memory = V4L2_MEMORY_USERPTR; |
+ qbuf.m.planes = qbuf_planes; |
+ qbuf.m.planes[0].bytesused = output_record.bytes_used[0]; |
+ qbuf.m.planes[0].length = mfc_output_buffer_size_[0]; |
+ qbuf.m.planes[0].m.userptr = (unsigned long)output_record.offset[0]; |
+ qbuf.m.planes[1].bytesused = output_record.bytes_used[1]; |
+ qbuf.m.planes[1].length = mfc_output_buffer_size_[1]; |
+ qbuf.m.planes[1].m.userptr = (unsigned long)output_record.offset[1]; |
+ qbuf.length = 2; |
+ errno = 0; |
+ ret = ioctl(gsc_fd_, VIDIOC_QBUF, &qbuf); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "EnqueueGsc(): ioctl() failed: VIDIOC_QBUF"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
+ input_record.at_device = true; |
+ input_record.mfc_output = mfc_buffer; |
+ output_record.bytes_used[0] = 0; |
+ output_record.bytes_used[1] = 0; |
+ mfc_output_gsc_input_queue_.pop_back(); |
+ gsc_free_input_buffers_.pop_back(); |
+ gsc_output_buffer_prepared_count_ -= 1; |
+ gsc_output_buffer_queued_count_ += 1; |
+ DVLOG(3) << "EnqueueGsc(): enqueued input_id=" << output_record.input_id; |
+ if (!gsc_input_streamon_) { |
+ __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; |
+ errno = 0; |
+ ret = ioctl(gsc_fd_, VIDIOC_STREAMON, &type); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "EnqueueGsc(): ioctl() failed: VIDIOC_STREAMON"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
+ gsc_input_streamon_ = true; |
+ } |
+ } |
+} |
+ |
+void ExynosVideoDecodeAccelerator::DequeueGsc() { |
+ DVLOG(3) << "DequeueGsc()"; |
+ DCHECK_EQ(decoder_thread_.message_loop(), MessageLoop::current()); |
+ DCHECK_NE(decoder_state_, kUninitialized); |
+ DCHECK_NE(decoder_state_, kInitialized); |
+ DCHECK_NE(decoder_state_, kAfterReset); |
+ |
+ int ret; |
+ struct v4l2_buffer dqbuf; |
+ |
+ // Dequeue completed GSC input (VIDEO_OUTPUT) buffers, and recycle to the free |
+ // list. Also recycle the corresponding MFC output buffers at this time. |
+ // Note that if we ever run completely dry of input buffers on the GSC device, |
+ // epoll() will return EPOLLERR and our DeviceTask() will exit early. Work |
+ // around this by never _completely_ draining the GSC input queue. |
+ while ((size_t)gsc_input_buffer_count_ > gsc_free_input_buffers_.size() + 1) { |
+ memset(&dqbuf, 0, sizeof(dqbuf)); |
+ dqbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; |
+ dqbuf.memory = V4L2_MEMORY_DMABUF; |
+ errno = 0; |
+ ret = ioctl(gsc_fd_, VIDIOC_DQBUF, &dqbuf); |
+ if (ret != 0) { |
+ if (errno == EAGAIN) { |
+ // Done all that we could. |
+ break; |
+ } else if (errno == EINVAL) { |
+ // We're not streaming this queue; skip. |
+ DVLOG(2) << "DequeueGsc(): VIDEO_OUTPUT not streaming, skipping"; |
+ break; |
+ } else { |
+ DPLOG(ERROR) << "DequeueGsc(): ioctl() failed: VIDIOC_DQBUF"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
+ } |
+ GscInputRecord& input_record = gsc_input_buffer_map_[dqbuf.index]; |
+ DCHECK_EQ(input_record.at_device, true); |
+ input_record.at_device = false; |
+ mfc_output_buffer_map_[input_record.mfc_output].input_id = -1; |
+ mfc_free_output_buffers_.push_back(input_record.mfc_output); |
+ input_record.mfc_output = -1; |
+ gsc_free_input_buffers_.push_back(dqbuf.index); |
+ } |
+ |
+ // Dequeue completed GSC output (VIDEO_CAPTURE) buffers, and send them off to |
+ // the VDA. Don't recycle to its free list yet -- we can't do that until |
+ // ReusePictureBuffer() returns it to us. |
+ memset(&dqbuf, 0, sizeof(dqbuf)); |
+ dqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
+ dqbuf.memory = V4L2_MEMORY_DMABUF; |
+ errno = 0; |
+ ret = ioctl(gsc_fd_, VIDIOC_DQBUF, &dqbuf); |
+ while (ret == 0) { |
+ GscOutputRecord& output_record = gsc_output_buffer_map_[dqbuf.index]; |
+ DCHECK_EQ(output_record.at_device, true); |
+ DCHECK_EQ(output_record.at_client, false); |
+ DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); |
+ output_record.at_device = false; |
+ output_record.at_client = true; |
+ gsc_output_buffer_queued_count_ -= 1; |
+ child_message_loop_proxy_->PostTask(FROM_HERE, base::Bind( |
+ &Client::PictureReady, client_, media::Picture( |
+ output_record.picture_id, dqbuf.timestamp.tv_sec))); |
+ decoder_frames_inflight_ -= 1; |
+ decoder_frames_at_client_ += 1; |
+ DVLOG(1) << "DequeueGsc(): dequeued input_id=" << dqbuf.timestamp.tv_sec << |
+ " as picture_id=" << output_record.picture_id; |
+ if (decoder_frames_inflight_ == 0 && decoder_flush_notify_requested_) { |
+ // We were asked for a flush notification, so let's do it. |
+ child_message_loop_proxy_->PostTask(FROM_HERE, base::Bind( |
+ &Client::NotifyFlushDone, client_)); |
+ } |
+ memset(&dqbuf, 0, sizeof(dqbuf)); |
+ dqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
+ dqbuf.memory = V4L2_MEMORY_DMABUF; |
+ errno = 0; |
+ ret = ioctl(gsc_fd_, VIDIOC_DQBUF, &dqbuf); |
+ } |
+ if (errno == EINVAL) { |
+ // We're not streaming this queue; skip. |
+ DVLOG(2) << "DequeueGsc(): VIDEO_CAPTURE not streaming, skipping"; |
+ } else if (errno != EAGAIN) { |
+ DPLOG(ERROR) << "DequeueGsc(): ioctl() failed: VIDIOC_DQBUF"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
+} |
+ |
+void ExynosVideoDecodeAccelerator::ReusePictureBufferTask( |
+ int32 picture_buffer_id, scoped_ptr<EGLSyncKHRRef> egl_sync_ref) { |
+ DVLOG(3) << "ReusePictureBufferTask(): picture_buffer_id=" << |
+ picture_buffer_id; |
+ DCHECK_EQ(decoder_thread_.message_loop(), MessageLoop::current()); |
+ |
+ // We run ReusePictureBufferTask even if we're in kResetting. |
+ if (decoder_state_ == kError) { |
+ DVLOG(2) << "ReusePictureBufferTask(): early out: kError state"; |
+ return; |
+ } |
+ |
+ size_t index; |
+ for (index = 0; index < gsc_output_buffer_map_.size(); index += 1) |
+ if (gsc_output_buffer_map_[index].picture_id == picture_buffer_id) |
+ break; |
+ |
+ if (index >= gsc_output_buffer_map_.size()) { |
+ DLOG(ERROR) << "ReusePictureBufferTask(): picture_buffer_id not found"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(INVALID_ARGUMENT); |
+ return; |
+ } |
+ |
+ GscOutputRecord& output_record = gsc_output_buffer_map_[index]; |
+ DCHECK_EQ(output_record.at_device, false); |
+ DCHECK_EQ(output_record.at_client, true); |
+ output_record.at_client = false; |
+ output_record.egl_sync = egl_sync_ref->egl_sync; |
+ gsc_free_output_buffers_.push_front(index); |
+ decoder_frames_at_client_ -= 1; |
+ // Take ownership of the EGLSync. |
+ egl_sync_ref->egl_sync = EGL_NO_SYNC_KHR; |
+ // We got a buffer back, so kick the GSC. |
+ EnqueueGsc(); |
+} |
+ |
+void ExynosVideoDecodeAccelerator::FlushTask() { |
+ DVLOG(3) << "FlushTask()"; |
+ // Flush the currently-building frame. |
+ FlushInputFrame(); |
+ |
+ if (decoder_frames_inflight_ == 0) { |
+ // If we don't have anything actually queued, we can notify immediatey. |
+ child_message_loop_proxy_->PostTask(FROM_HERE, base::Bind( |
+ &Client::NotifyFlushDone, client_)); |
+ } else { |
+ // We'll flag that we want a flush-finished notification, and just return. |
+ decoder_flush_notify_requested_ = true; |
+ } |
+} |
+ |
+void ExynosVideoDecodeAccelerator::ResetTask() { |
+ DVLOG(3) << "ResetTask()"; |
+ DCHECK_EQ(decoder_thread_.message_loop(), MessageLoop::current()); |
+ // We stop streaming, but we _don't_ destroy our buffers. |
+ if (!StopDevice()) |
+ return; |
+ |
+ if (decoder_current_bitstream_buffer_ != NULL) { |
+ int input_id; |
+ input_id = decoder_current_bitstream_buffer_->input_id; |
+ decoder_current_bitstream_buffer_.reset(NULL); |
+ child_message_loop_proxy_->PostTask(FROM_HERE, base::Bind( |
+ &Client::NotifyEndOfBitstreamBuffer, client_, input_id)); |
+ } |
+ while (!decoder_input_queue_.empty()) { |
+ int input_id; |
+ scoped_ptr<BitstreamBufferRecord> |
+ bitstream_record(decoder_input_queue_.back().release()); |
+ decoder_input_queue_.pop_back(); |
+ input_id = bitstream_record->input_id; |
+ child_message_loop_proxy_->PostTask(FROM_HERE, base::Bind( |
+ &Client::NotifyEndOfBitstreamBuffer, client_, input_id)); |
+ } |
+ |
+ decoder_current_input_buffer_ = -1; |
+ decoder_decode_buffer_tasks_scheduled_ = 0; |
+ decoder_frames_inflight_ = 0; |
+ decoder_flush_notify_requested_ = false; |
+ |
+ // Mark that we're resetting, then enqueue a ResetDoneTask(). All intervening |
+ // jobs will early-out in the kResetting state. |
+ decoder_state_ = kResetting; |
+ decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind( |
+ &ExynosVideoDecodeAccelerator::ResetDoneTask, base::Unretained(this))); |
+} |
+ |
+void ExynosVideoDecodeAccelerator::ResetDoneTask() { |
+ DVLOG(3) << "ResetDoneTask()"; |
+ // Jobs drained, we're finished resetting. |
+ decoder_state_ = kAfterReset; |
+ child_message_loop_proxy_->PostTask(FROM_HERE, base::Bind( |
+ &Client::NotifyResetDone, client_)); |
+} |
+ |
+void ExynosVideoDecodeAccelerator::DestroyTask() { |
+ DVLOG(3) << "DestroyTask()"; |
+ DCHECK_EQ(decoder_thread_.message_loop(), MessageLoop::current()); |
+ |
+ // Stop streaming and the device_thread_. |
+ StopDevice(); |
+ |
+ decoder_current_bitstream_buffer_.reset(NULL); |
+ decoder_current_input_buffer_ = -1; |
+ decoder_decode_buffer_tasks_scheduled_ = 0; |
+ decoder_frames_inflight_ = 0; |
+ decoder_frames_at_client_ = 0; |
+ decoder_flush_notify_requested_ = false; |
+ decoder_input_queue_.clear(); |
+ |
+ // Set our state to kError. This will cause all subsequent tasks to |
+ // early-exit. |
+ decoder_state_ = kError; |
+} |
+ |
+void ExynosVideoDecodeAccelerator::CleanupTask() { |
+ DVLOG(3) << "CleanupTask()"; |
+ DCHECK_EQ(child_message_loop_proxy_, base::MessageLoopProxy::current()); |
+ decoder_state_ = kError; |
Ami GONE FROM CHROMIUM
2012/10/31 01:06:50
what's this? more decoder_state_ writing on the c
sheu
2012/11/01 02:16:08
Removed, see .h. This one's an actual bug :-)
|
+} |
+ |
+bool ExynosVideoDecodeAccelerator::StartDevice() { |
+ DVLOG(3) << "StartDevice()"; |
+ DCHECK_EQ(decoder_thread_.message_loop(), MessageLoop::current()); |
+ int ret; |
+ |
+ // Early-out if we're already running. |
+ if (device_thread_.IsRunning()) { |
+ DVLOG(2) << "StartDevice(): early out: device already running"; |
+ return true; |
+ } |
+ |
+ // At least one of the OUTPUT or CAPTURE queues for each of MFC and GSC must |
+ // be in STREAMON state for epoll() not to return EPOLERR. So: |
+ // * for MSC, we'll start the CAPTURE queue |
+ // * for GSC, we'll start the CAPTURE queue |
+ // STREAMON requires, unfortunately, that we have buffers already queued. |
+ // We'll need to have the buffers available to queue. |
+ if ((!mfc_output_streamon_ && mfc_free_output_buffers_.empty()) || |
+ gsc_free_output_buffers_.empty()) { |
+ DVLOG(2) << "StartDevice(): early out: output buffers unavailable"; |
+ return true; |
+ } |
+ |
+ struct v4l2_buffer qbuf; |
+ struct v4l2_plane qbuf_planes[2]; |
+ |
+ // The MFC output queue may already have been started when we started |
+ // enqueuing MFC input buffers. |
+ if (!mfc_output_streamon_) { |
+ // Queue and start the MFC CAPTURE queue |
+ int buffer; |
+ __u32 type; |
+ memset(&qbuf, 0, sizeof(qbuf)); |
+ memset(qbuf_planes, 0, sizeof(qbuf_planes)); |
+ buffer = mfc_free_output_buffers_.back(); |
+ MfcOutputRecord& output_record = mfc_output_buffer_map_[buffer]; |
+ DCHECK_EQ(output_record.at_device, false); |
+ DCHECK_EQ(output_record.input_id, -1); |
+ qbuf.index = buffer; |
+ qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
+ qbuf.memory = V4L2_MEMORY_MMAP; |
+ qbuf.m.planes = qbuf_planes; |
+ qbuf.length = 2; |
+ errno = 0; |
+ ret = ioctl(mfc_fd_, VIDIOC_QBUF, &qbuf); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "StartDevice(): ioctl() failed: VIDIOC_QBUF"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } |
+ output_record.at_device = true; |
+ mfc_free_output_buffers_.pop_back(); |
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
+ errno = 0; |
+ ret = ioctl(mfc_fd_, VIDIOC_STREAMON, &type); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "StartDevice(): ioctl() failed: VIDIOC_STREAMON"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } |
+ mfc_output_streamon_ = true; |
+ } |
+ |
+ // GSC output should not have been started before we received output buffers. |
+ DCHECK_EQ(gsc_output_streamon_, false); |
+ { |
+ // Queue and start the GSC CAPTURE queue |
+ int buffer; |
+ __u32 type; |
+ memset(&qbuf, 0, sizeof(qbuf)); |
+ memset(qbuf_planes, 0, sizeof(qbuf_planes)); |
+ buffer = gsc_free_output_buffers_.back(); |
+ GscOutputRecord& output_record = gsc_output_buffer_map_[buffer]; |
+ DCHECK_EQ(output_record.at_device, false); |
+ DCHECK_EQ(output_record.at_client, false); |
+ qbuf.index = buffer; |
+ qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
+ qbuf.memory = V4L2_MEMORY_DMABUF; |
+ qbuf.m.planes = qbuf_planes; |
+ qbuf.m.planes[0].m.fd = gsc_output_buffer_map_[buffer].fd; |
+ qbuf.length = 1; |
+ errno = 0; |
+ ret = ioctl(gsc_fd_, VIDIOC_QBUF, &qbuf); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "StartDevice(): ioctl() failed: VIDIOC_QBUF"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } |
+ output_record.at_device = true; |
+ gsc_free_output_buffers_.pop_back(); |
+ gsc_output_buffer_prepared_count_ += 1; |
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
+ errno = 0; |
+ ret = ioctl(gsc_fd_, VIDIOC_STREAMON, &type); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "StartDevice(): ioctl() failed: VIDIOC_STREAMON"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } |
+ gsc_output_streamon_ = true; |
+ |
+ if (!device_thread_.Start()) { |
+ DLOG(ERROR) << "StartDevice(): Device thread failed to start"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } |
+ device_thread_.message_loop()->PostTask(FROM_HERE, base::Bind( |
+ &ExynosVideoDecodeAccelerator::DeviceTask, base::Unretained(this))); |
+ } |
+ |
+ return true; |
+} |
+ |
+bool ExynosVideoDecodeAccelerator::StopDevice() { |
+ DVLOG(3) << "StopDevice()"; |
+ DCHECK_EQ(decoder_thread_.message_loop(), MessageLoop::current()); |
+ int ret; |
+ |
+ // Stop streaming. |
+ if (mfc_input_streamon_) { |
+ __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; |
+ errno = 0; |
+ ret = ioctl(mfc_fd_, VIDIOC_STREAMOFF, &type); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "StopDevice(): ioctl() failed: VIDIOC_STREAMOFF"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } |
+ } |
+ mfc_input_streamon_ = false; |
+ if (mfc_output_streamon_) { |
+ __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
+ errno = 0; |
+ ret = ioctl(mfc_fd_, VIDIOC_STREAMOFF, &type); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "StopDevice(): ioctl() failed: VIDIOC_STREAMOFF"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } |
+ } |
+ mfc_output_streamon_ = false; |
+ if (gsc_input_streamon_) { |
+ __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; |
+ errno = 0; |
+ ret = ioctl(gsc_fd_, VIDIOC_STREAMOFF, &type); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "StopDevice(): ioctl() failed: VIDIOC_STREAMOFF"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } |
+ } |
+ gsc_input_streamon_ = false; |
+ if (gsc_output_streamon_) { |
+ __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
+ errno = 0; |
+ ret = ioctl(gsc_fd_, VIDIOC_STREAMOFF, &type); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "StopDevice(): ioctl() failed: VIDIOC_STREAMOFF"; |
+ decoder_state_ = kError; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } |
+ } |
+ gsc_output_streamon_ = false; |
+ |
+ // Stopping streaming should cause DeviceTask() to exit. Join the thread. |
+ device_thread_.Stop(); |
+ |
+ // Reset all our accounting info. |
+ mfc_input_ready_queue_.clear(); |
+ mfc_free_input_buffers_.clear(); |
+ DCHECK_EQ((size_t)mfc_input_buffer_count_, mfc_input_buffer_map_.size()); |
+ for (size_t i = 0; i < mfc_input_buffer_map_.size(); i += 1) { |
+ mfc_free_input_buffers_.push_back(i); |
+ mfc_input_buffer_map_[i].at_device = false; |
+ mfc_input_buffer_map_[i].bytes_used = 0; |
+ mfc_input_buffer_map_[i].input_id = -1; |
+ } |
+ mfc_free_output_buffers_.clear(); |
+ DCHECK_EQ((size_t)mfc_output_buffer_count_, mfc_output_buffer_map_.size()); |
+ for (size_t i = 0; i < mfc_output_buffer_map_.size(); i += 1) { |
+ mfc_free_output_buffers_.push_back(i); |
+ mfc_output_buffer_map_[i].at_device = false; |
+ mfc_output_buffer_map_[i].input_id = -1; |
+ } |
+ mfc_output_gsc_input_queue_.clear(); |
+ gsc_free_input_buffers_.clear(); |
+ DCHECK_EQ((size_t)gsc_input_buffer_count_, gsc_input_buffer_map_.size()); |
+ for (size_t i = 0; i < gsc_input_buffer_map_.size(); i += 1) { |
+ gsc_free_input_buffers_.push_back(i); |
+ gsc_input_buffer_map_[i].at_device = false; |
+ gsc_input_buffer_map_[i].mfc_output = -1; |
+ } |
+ gsc_free_output_buffers_.clear(); |
+ DCHECK_EQ((size_t)gsc_output_buffer_count_, gsc_output_buffer_map_.size()); |
+ for (size_t i = 0; i < gsc_output_buffer_map_.size(); i += 1) { |
+ // Only mark those free that aren't being held by the VDA. |
+ if (!gsc_output_buffer_map_[i].at_client) { |
+ gsc_free_output_buffers_.push_back(i); |
+ gsc_output_buffer_map_[i].at_device = false; |
+ } |
+ if (gsc_output_buffer_map_[i].egl_sync != EGL_NO_SYNC_KHR) { |
+ (*egl_destroy_sync_khr)(egl_display_, gsc_output_buffer_map_[i].egl_sync); |
+ gsc_output_buffer_map_[i].egl_sync = EGL_NO_SYNC_KHR; |
+ } |
+ } |
+ gsc_output_buffer_prepared_count_ = 0; |
+ gsc_output_buffer_queued_count_ = 0; |
+ |
+ DVLOG(3) << "StopDevice(): device stopped"; |
+ return true; |
+} |
+ |
+void ExynosVideoDecodeAccelerator::DeviceTask() { |
+ DVLOG(3) << "DeviceTask()"; |
+ DCHECK_EQ(device_thread_.message_loop(), MessageLoop::current()); |
+ // This routine just polls on the V4L2 devices, and notifies the |
+ // decoder_thread_ when processing needs to occur. The main loop will |
+ // terminate when we return an EPOLLERR to epoll() on the device file |
+ // descriptors, which should occur when the devices are sent VIDIOC_STREAMOFF |
+ // on their queues. |
+ |
+ int epoll_fd = -1; |
+ file_util::ScopedFD epoll_fd_closer(&epoll_fd); |
+ int ret; |
+ |
+ struct epoll_event event; |
+ errno = 0; |
+ epoll_fd = epoll_create(2); |
+ if (epoll_fd == -1) { |
+ DPLOG(ERROR) << "DeviceTask(): epoll() failed"; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
+ event.events = EPOLLIN | EPOLLOUT | EPOLLERR | EPOLLET; |
+ event.data.fd = mfc_fd_; |
+ errno = 0; |
+ ret = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, mfc_fd_, &event); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "DeviceTask(): epoll_ctl() failed"; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
+ event.events = EPOLLIN | EPOLLOUT | EPOLLERR | EPOLLET; |
+ event.data.fd = gsc_fd_; |
+ errno = 0; |
+ ret = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, gsc_fd_, &event); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "DeviceTask(): epoll_ctl() failed"; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
+ |
+ for (;;) { |
+ // We epoll() and wait for more interesting things to happen. |
+ // If we get a VIDIOC_STREAMOFF to the device, epoll() will return EPOLLERR, |
+ // and we know to exit the loop that way. |
+ DVLOG(3) << "DeviceTask(): epoll()"; |
+ struct epoll_event event; |
+ do { |
+ errno = 0; |
+ ret = epoll_wait(epoll_fd, &event, 1, -1); |
+ } while (ret < 1 && errno == EINTR); |
+ if (ret == -1) { |
+ DPLOG(ERROR) << "DeviceTask(): epoll_wait() failed"; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
+ if (event.data.fd == mfc_fd_) { |
+ if ((event.events & EPOLLERR) != 0) { |
+ DVLOG(2) << "DeviceTask(): epoll() returned EPOLERR for mfc_fd_"; |
+ // Not necessarily an error. |
+ return; |
+ } |
+ decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind( |
+ &ExynosVideoDecodeAccelerator::ServiceDeviceTask, |
+ base::Unretained(this))); |
+ // If we're behind on tasks, schedule another one. |
+ size_t buffers_pending = decoder_input_queue_.size(); |
+ if (decoder_current_bitstream_buffer_ != NULL) |
+ buffers_pending += 1; |
+ if ((size_t)decoder_decode_buffer_tasks_scheduled_ < buffers_pending) { |
+ decoder_decode_buffer_tasks_scheduled_ += 1; |
+ decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind( |
+ &ExynosVideoDecodeAccelerator::DecodeBufferTask, |
+ base::Unretained(this))); |
+ } |
+ } else if (event.data.fd == gsc_fd_) { |
+ if ((event.events & EPOLLERR) != 0) { |
+ DVLOG(2) << "DeviceTask(): epoll() returned EPOLERR for gsc_fd_"; |
+ // Not necessarily an error. |
+ return; |
+ } |
+ decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind( |
+ &ExynosVideoDecodeAccelerator::ServiceDeviceTask, |
+ base::Unretained(this))); |
+ } else { |
+ DLOG(ERROR) << "DeviceTask(): epoll() returned unknown fd"; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
+ } |
+} |
+ |
+void ExynosVideoDecodeAccelerator::NotifyError(Error error) { |
+ DVLOG(2) << "NotifyError()"; |
+ if (child_message_loop_proxy_ != base::MessageLoopProxy::current()) { |
+ child_message_loop_proxy_->PostTask(FROM_HERE, base::Bind( |
+ &ExynosVideoDecodeAccelerator::NotifyError, weak_this_, error)); |
+ return; |
+ } |
+ |
+ // Post CleanupTask() as a task so we don't recursively acquire any locks we |
+ // might hold. |
+ child_message_loop_proxy_->PostTask(FROM_HERE, base::Bind( |
+ &ExynosVideoDecodeAccelerator::CleanupTask, weak_this_)); |
+ |
+ if (client_) { |
+ client_->NotifyError(error); |
+ client_ptr_factory_.InvalidateWeakPtrs(); |
+ } |
+} |
+ |
+bool ExynosVideoDecodeAccelerator::CreateMfcInputBuffers() { |
+ DVLOG(3) << "CreateMfcInputBuffers()"; |
+ // We always run this as we prepare to initialize. |
+ DCHECK_EQ(decoder_state_, kUninitialized); |
+ DCHECK_EQ(mfc_input_streamon_, false); |
+ DCHECK_EQ(mfc_input_buffer_count_, 0); |
+ |
+ int ret; |
+ |
+ __u32 pixelformat = 0; |
+ if (video_profile_ >= media::H264PROFILE_MIN && |
+ video_profile_ <= media::H264PROFILE_MAX) { |
+ pixelformat = V4L2_PIX_FMT_H264; |
+ } else if (video_profile_ >= media::VP8PROFILE_MIN && |
+ video_profile_ <= media::VP8PROFILE_MAX) { |
+ pixelformat = V4L2_PIX_FMT_VP8; |
+ } |
+ |
+ struct v4l2_format format; |
+ memset(&format, 0, sizeof(format)); |
+ format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; |
+ format.fmt.pix_mp.pixelformat = pixelformat; |
+ format.fmt.pix_mp.plane_fmt[0].sizeimage = kMfcInputBufferMaxSize; |
+ format.fmt.pix_mp.num_planes = 1; |
+ errno = 0; |
+ ret = ioctl(mfc_fd_, VIDIOC_S_FMT, &format); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "CreateMfcInputBuffers(): ioctl() failed: VIDIOC_S_FMT"; |
+ return false; |
+ } |
+ |
+ struct v4l2_requestbuffers reqbufs; |
+ memset(&reqbufs, 0, sizeof(reqbufs)); |
+ reqbufs.count = kMfcInputBufferCount; |
+ reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; |
+ reqbufs.memory = V4L2_MEMORY_MMAP; |
+ errno = 0; |
+ ret = ioctl(mfc_fd_, VIDIOC_REQBUFS, &reqbufs); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "CreateMfcInputBuffers(): ioctl() failed: VIDIOC_REQBUFS"; |
+ return false; |
+ } |
+ mfc_input_buffer_count_ = reqbufs.count; |
+ mfc_input_buffer_map_.resize(mfc_input_buffer_count_); |
+ for (int i = 0; i < mfc_input_buffer_count_; i += 1) { |
+ mfc_free_input_buffers_.push_back(i); |
+ |
+ // Query for the MEMORY_MMAP pointer. |
+ struct v4l2_plane planes[1]; |
+ struct v4l2_buffer buffer; |
+ memset(&buffer, 0, sizeof(buffer)); |
+ memset(planes, 0, sizeof(planes)); |
+ buffer.index = i; |
+ buffer.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; |
+ buffer.memory = V4L2_MEMORY_MMAP; |
+ buffer.m.planes = planes; |
+ buffer.length = 1; |
+ errno = 0; |
+ ret = ioctl(mfc_fd_, VIDIOC_QUERYBUF, &buffer); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << |
+ "CreateMfcInputBuffers(): ioctl() failed: VIDIOC_QUERYBUF"; |
+ return false; |
+ } |
+ errno = 0; |
+ void* offset = mmap(NULL, buffer.m.planes[0].length, |
+ PROT_READ | PROT_WRITE, MAP_SHARED, mfc_fd_, |
+ buffer.m.planes[0].m.mem_offset); |
+ if (offset == MAP_FAILED) { |
+ DPLOG(ERROR) << "CreateMfcInputBuffers(): mmap() failed"; |
+ return false; |
+ } |
+ mfc_input_buffer_map_[i].offset = offset; |
+ mfc_input_buffer_map_[i].length = buffer.m.planes[0].length; |
+ } |
+ |
+ return true; |
+} |
+ |
+bool ExynosVideoDecodeAccelerator::CreateMfcOutputBuffers() { |
+ DVLOG(3) << "CreateMfcOutputBuffers()"; |
+ DCHECK_EQ(decoder_state_, kInitialized); |
+ DCHECK_EQ(mfc_output_streamon_, false); |
+ DCHECK_EQ(mfc_output_buffer_count_, 0); |
+ |
+ int ret; |
+ |
+ // Number of MFC output buffers we need. |
+ struct v4l2_control ctrl; |
+ memset(&ctrl, 0, sizeof(ctrl)); |
+ ctrl.id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE; |
+ errno = 0; |
+ ret = ioctl(mfc_fd_, VIDIOC_G_CTRL, &ctrl); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "CreateMfcOutputBuffers(): ioctl() failed: VIDIOC_G_CTRL"; |
+ return false; |
+ } |
+ |
+ // Output format setup in Initialize(). |
+ |
+ // Allocate the output buffers. |
+ struct v4l2_requestbuffers reqbufs; |
+ memset(&reqbufs, 0, sizeof(reqbufs)); |
+ reqbufs.count = ctrl.value + kMfcOutputBufferExtraCount; |
+ reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
+ reqbufs.memory = V4L2_MEMORY_MMAP; |
+ errno = 0; |
+ ret = ioctl(mfc_fd_, VIDIOC_REQBUFS, &reqbufs); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "CreateMfcOutputBuffers(): ioctl() failed: VIDIOC_REQBUFS"; |
+ return false; |
+ } |
+ |
+ // Fill our free-buffers list, and create DMABUFs from them. |
+ mfc_output_buffer_count_ = reqbufs.count; |
+ mfc_output_buffer_map_.resize(mfc_output_buffer_count_); |
+ for (int i = 0; i < mfc_output_buffer_count_; i += 1) { |
+ mfc_free_output_buffers_.push_back(i); |
+ |
+ // Query for the MEMORY_MMAP pointer. |
+ struct v4l2_plane planes[2]; |
+ struct v4l2_buffer buffer; |
+ memset(&buffer, 0, sizeof(buffer)); |
+ memset(planes, 0, sizeof(planes)); |
+ buffer.index = i; |
+ buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
+ buffer.memory = V4L2_MEMORY_MMAP; |
+ buffer.m.planes = planes; |
+ buffer.length = 2; |
+ errno = 0; |
+ ret = ioctl(mfc_fd_, VIDIOC_QUERYBUF, &buffer); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << |
+ "CreateMfcOutputBuffers(): ioctl() failed: VIDIOC_QUERYBUF"; |
+ return false; |
+ } |
+ |
+ // Get their user memory for GSC input. |
+ for (int j = 0; j < 2; j += 1) { |
+ errno = 0; |
+ void* offset = mmap(NULL, buffer.m.planes[j].length, |
+ PROT_READ | PROT_WRITE, MAP_SHARED, mfc_fd_, |
+ buffer.m.planes[j].m.mem_offset); |
+ if (offset == MAP_FAILED) { |
+ DPLOG(ERROR) << "CreateMfcInputBuffers(): mmap() failed"; |
+ return false; |
+ } |
+ mfc_output_buffer_map_[i].offset[j] = offset; |
+ mfc_output_buffer_map_[i].length[j] = buffer.m.planes[j].length; |
+ } |
+ } |
+ |
+ return true; |
+} |
+ |
+bool ExynosVideoDecodeAccelerator::CreateGscInputBuffers() { |
+ DVLOG(3) << "CreateGscInputBuffers()"; |
+ DCHECK_EQ(decoder_state_, kInitialized); |
+ DCHECK_EQ(gsc_input_streamon_, false); |
+ DCHECK_EQ(gsc_input_buffer_count_, 0); |
+ |
+ int ret; |
+ |
+ struct v4l2_format format; |
+ memset(&format, 0, sizeof(format)); |
+ format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; |
+ format.fmt.pix_mp.width = frame_buffer_size_.width(); |
+ format.fmt.pix_mp.height = frame_buffer_size_.height(); |
+ DCHECK_EQ(mfc_output_buffer_pixelformat_, V4L2_PIX_FMT_NV12MT_16X16); |
+ format.fmt.pix_mp.pixelformat = mfc_output_buffer_pixelformat_; |
+ format.fmt.pix_mp.plane_fmt[0].sizeimage = mfc_output_buffer_size_[0]; |
+ format.fmt.pix_mp.plane_fmt[1].sizeimage = mfc_output_buffer_size_[1]; |
+ // NV12MT_16X16 is a tiled format for which bytesperline doesn't make too much |
+ // sense. Convention seems to be to assume 8bpp for these tiled formats. |
+ format.fmt.pix_mp.plane_fmt[0].bytesperline = frame_buffer_size_.width(); |
+ format.fmt.pix_mp.plane_fmt[1].bytesperline = frame_buffer_size_.width(); |
+ format.fmt.pix_mp.num_planes = 2; |
+ errno = 0; |
+ ret = ioctl(gsc_fd_, VIDIOC_S_FMT, &format); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "CreateGscInputBuffers(): ioctl() failed: VIDIOC_S_FMT"; |
+ return false; |
+ } |
+ |
+ struct v4l2_control control; |
+ memset(&control, 0, sizeof(control)); |
+ control.id = V4L2_CID_ROTATE; |
+ control.value = 0; |
+ errno = 0; |
+ ret = ioctl(gsc_fd_, VIDIOC_S_CTRL, &control); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "CreateGscInputBuffers(): ioctl() failed: VIDIOC_S_CTRL"; |
+ return false; |
+ } |
+ |
+ memset(&control, 0, sizeof(control)); |
+ control.id = V4L2_CID_HFLIP; |
+ control.value = 0; |
+ errno = 0; |
+ ret = ioctl(gsc_fd_, VIDIOC_S_CTRL, &control); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "CreateGscInputBuffers(): ioctl() failed: VIDIOC_S_CTRL"; |
+ return false; |
+ } |
+ |
+ memset(&control, 0, sizeof(control)); |
+ control.id = V4L2_CID_VFLIP; |
+ control.value = 0; |
+ errno = 0; |
+ ret = ioctl(gsc_fd_, VIDIOC_S_CTRL, &control); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "CreateGscInputBuffers(): ioctl() failed: VIDIOC_S_CTRL"; |
+ return false; |
+ } |
+ |
+ memset(&control, 0, sizeof(control)); |
+ control.id = V4L2_CID_GLOBAL_ALPHA; |
+ control.value = 255; |
+ errno = 0; |
+ ret = ioctl(gsc_fd_, VIDIOC_S_CTRL, &control); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "CreateGscInputBuffers(): ioctl() failed: VIDIOC_S_CTRL"; |
+ return false; |
+ } |
+ |
+ struct v4l2_requestbuffers reqbufs; |
+ memset(&reqbufs, 0, sizeof(reqbufs)); |
+ reqbufs.count = kGscOutputBufferCount; |
+ reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; |
+ reqbufs.memory = V4L2_MEMORY_USERPTR; |
+ errno = 0; |
+ ret = ioctl(gsc_fd_, VIDIOC_REQBUFS, &reqbufs); |
+ if (ret != 0 ) { |
+ DPLOG(ERROR) << "CreateGscInputBuffers(): ioctl() failed: VIDIOC_REQBUFS"; |
+ return false; |
+ } |
+ |
+ gsc_input_buffer_count_ = reqbufs.count; |
+ gsc_input_buffer_map_.resize(gsc_input_buffer_count_); |
+ for (int i = 0; i < gsc_input_buffer_count_; i += 1) { |
+ gsc_free_input_buffers_.push_back(i); |
+ gsc_input_buffer_map_[i].mfc_output = -1; |
+ } |
+ |
+ return true; |
+} |
+ |
+bool ExynosVideoDecodeAccelerator::CreateGscOutputBuffers() { |
+ DVLOG(3) << "CreateGscOutputBuffers()"; |
+ DCHECK_EQ(decoder_state_, kInitialized); |
+ DCHECK_EQ(gsc_output_streamon_, false); |
+ DCHECK_EQ(gsc_output_buffer_count_, 0); |
+ |
+ int ret; |
+ |
+ // GSC outputs into the EGLImages we create from the textures we are |
+ // assigned. Assume RGBA8888 format. |
+ struct v4l2_format format; |
+ memset(&format, 0, sizeof(format)); |
+ format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
+ format.fmt.pix_mp.width = frame_buffer_size_.width(); |
+ format.fmt.pix_mp.height = frame_buffer_size_.height(); |
+ format.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_RGB32; |
+ format.fmt.pix_mp.plane_fmt[0].sizeimage = |
+ frame_buffer_size_.width() * frame_buffer_size_.height() * 4; |
+ format.fmt.pix_mp.plane_fmt[0].bytesperline = frame_buffer_size_.width() * 4; |
+ format.fmt.pix_mp.num_planes = 1; |
+ errno = 0; |
+ ret = ioctl(gsc_fd_, VIDIOC_S_FMT, &format); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "CreateGscOutputBuffers(): ioctl() failed: VIDIOC_S_FMT"; |
+ return false; |
+ } |
+ |
+ struct v4l2_requestbuffers reqbufs; |
+ memset(&reqbufs, 0, sizeof(reqbufs)); |
+ reqbufs.count = kGscOutputBufferCount; |
+ reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
+ reqbufs.memory = V4L2_MEMORY_DMABUF; |
+ errno = 0; |
+ ret = ioctl(gsc_fd_, VIDIOC_REQBUFS, &reqbufs); |
+ if (ret != 0) { |
+ DPLOG(ERROR) << "CreateGscOutputBuffers(): ioctl() failed: VIDIOC_REQBUFS"; |
+ return false; |
+ } |
+ |
+ // We don't actually fill in the freelist or the map here. That happens once |
+ // we have actual usable buffers, after AssignPictureBuffers(); |
+ gsc_output_buffer_count_ = reqbufs.count; |
+ gsc_output_buffer_map_.resize(gsc_output_buffer_count_); |
+ |
+ DVLOG(3) << "CreateGscOutputBuffers(): ProvidePictureBuffers(): " |
+ "buffer_count=" << gsc_output_buffer_count_ << |
+ ", width=" << frame_buffer_size_.width() << |
+ ", height=" << frame_buffer_size_.height(); |
+ child_message_loop_proxy_->PostTask(FROM_HERE, base::Bind( |
+ &Client::ProvidePictureBuffers, client_, gsc_output_buffer_count_, |
+ gfx::Size(frame_buffer_size_.width(), frame_buffer_size_.height()), |
+ GL_TEXTURE_2D)); |
+ |
+ return true; |
+} |
+ |
+void ExynosVideoDecodeAccelerator::DestroyMfcInputBuffers() { |
+ DVLOG(3) << "DestroyMfcInputBuffers()"; |
+ DCHECK_EQ(child_message_loop_proxy_, base::MessageLoopProxy::current()); |
+ DCHECK_EQ(mfc_input_streamon_, false); |
+ |
+ int ret; |
+ |
+ for (size_t i = 0; i < mfc_input_buffer_map_.size(); i += 1) { |
+ if (mfc_input_buffer_map_[i].offset != NULL) { |
+ munmap(mfc_input_buffer_map_[i].offset, |
+ mfc_input_buffer_map_[i].length); |
+ } |
+ } |
+ |
+ struct v4l2_requestbuffers reqbufs; |
+ memset(&reqbufs, 0, sizeof(reqbufs)); |
+ reqbufs.count = 0; |
+ reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; |
+ reqbufs.memory = V4L2_MEMORY_MMAP; |
+ errno = 0; |
+ ret = ioctl(mfc_fd_, VIDIOC_REQBUFS, &reqbufs); |
+ if (ret != 0) |
+ DPLOG(ERROR) << "DestroyMfcInputBuffers(): ioctl() failed: VIDIOC_REQBUFS"; |
+ |
+ mfc_input_buffer_map_.clear(); |
+ mfc_free_input_buffers_.clear(); |
+ mfc_input_buffer_count_ = 0; |
+} |
+ |
+void ExynosVideoDecodeAccelerator::DestroyMfcOutputBuffers() { |
+ DVLOG(3) << "DestroyMfcOutputBuffers()"; |
+ DCHECK_EQ(child_message_loop_proxy_, base::MessageLoopProxy::current()); |
+ DCHECK_EQ(mfc_output_streamon_, false); |
+ |
+ int ret; |
+ |
+ for (size_t i = 0; i < mfc_output_buffer_map_.size(); i += 1) { |
+ if (mfc_output_buffer_map_[i].offset[0] != NULL) |
+ munmap(mfc_output_buffer_map_[i].offset[0], |
+ mfc_output_buffer_map_[i].length[0]); |
+ if (mfc_output_buffer_map_[i].offset[1] != NULL) |
+ munmap(mfc_output_buffer_map_[i].offset[1], |
+ mfc_output_buffer_map_[i].length[1]); |
+ } |
+ |
+ struct v4l2_requestbuffers reqbufs; |
+ memset(&reqbufs, 0, sizeof(reqbufs)); |
+ reqbufs.count = 0; |
+ reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
+ reqbufs.memory = V4L2_MEMORY_MMAP; |
+ errno = 0; |
+ ret = ioctl(mfc_fd_, VIDIOC_REQBUFS, &reqbufs); |
+ if (ret != 0) |
+ DPLOG(ERROR) << "DestroyMfcOutputBuffers() ioctl() failed: VIDIOC_REQBUFS"; |
+ |
+ mfc_output_buffer_map_.clear(); |
+ mfc_free_output_buffers_.clear(); |
+ mfc_output_buffer_count_ = 0; |
+} |
+ |
+void ExynosVideoDecodeAccelerator::DestroyGscInputBuffers() { |
+ DVLOG(3) << "DestroyGscInputBuffers()"; |
+ DCHECK_EQ(child_message_loop_proxy_, base::MessageLoopProxy::current()); |
+ DCHECK_EQ(gsc_input_streamon_, false); |
+ |
+ int ret; |
+ |
+ struct v4l2_requestbuffers reqbufs; |
+ memset(&reqbufs, 0, sizeof(reqbufs)); |
+ reqbufs.count = 0; |
+ reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; |
+ reqbufs.memory = V4L2_MEMORY_DMABUF; |
+ errno = 0; |
+ ret = ioctl(gsc_fd_, VIDIOC_REQBUFS, &reqbufs); |
+ if (ret != 0) |
+ DPLOG(ERROR) << "DestroyGscInputBuffers(): ioctl() failed: VIDIOC_REQBUFS"; |
+ |
+ gsc_input_buffer_map_.clear(); |
+ gsc_free_input_buffers_.clear(); |
+ gsc_input_buffer_count_ = 0; |
+} |
+ |
+void ExynosVideoDecodeAccelerator::DestroyGscOutputBuffers() { |
+ DVLOG(3) << "DestroyGscOutputBuffers()"; |
+ DCHECK_EQ(child_message_loop_proxy_, base::MessageLoopProxy::current()); |
+ DCHECK_EQ(gsc_output_streamon_, false); |
+ |
+ int ret; |
+ |
+ if (gsc_output_buffer_map_.size() != 0) { |
+ if (!make_context_current_.Run()) |
+ DLOG(ERROR) << |
+ "DestroyGscOutputBuffers(): could not make context current"; |
+ |
+ size_t i = 0; |
+ do { |
+ GscOutputRecord& output_record = gsc_output_buffer_map_[i]; |
+ if (output_record.fd != -1) |
+ close(output_record.fd); |
+ if (output_record.egl_image != EGL_NO_IMAGE_KHR) |
+ eglDestroyImageKHR(egl_display_, output_record.egl_image); |
+ if (output_record.egl_sync != EGL_NO_SYNC_KHR) |
+ (*egl_destroy_sync_khr)(egl_display_, output_record.egl_sync); |
+ if (client_) |
+ client_->DismissPictureBuffer(output_record.picture_id); |
+ i += 1; |
+ } while (i < gsc_output_buffer_map_.size()); |
+ } |
+ |
+ struct v4l2_requestbuffers reqbufs; |
+ memset(&reqbufs, 0, sizeof(reqbufs)); |
+ reqbufs.count = 0; |
+ reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
+ reqbufs.memory = V4L2_MEMORY_DMABUF; |
+ errno = 0; |
+ ret = ioctl(gsc_fd_, VIDIOC_REQBUFS, &reqbufs); |
+ if (ret != 0) |
+ DPLOG(ERROR) << "DestroyGscOutputBuffers(): ioctl() failed: VIDIOC_REQBUFS"; |
+ |
+ gsc_output_buffer_map_.clear(); |
+ gsc_free_output_buffers_.clear(); |
+ gsc_output_buffer_count_ = 0; |
+} |
+ |
+} // namespace content |