|
|
Index: content/common/gpu/media/v4l2_video_decode_accelerator.cc |
diff --git a/content/common/gpu/media/v4l2_video_decode_accelerator.cc b/content/common/gpu/media/v4l2_video_decode_accelerator.cc |
index 285c96b0f8b6cde259b9c4876c5e5bf9531cbebf..ed5d218d16102f37987c1f19580f764e00c5e6b8 100644 |
--- a/content/common/gpu/media/v4l2_video_decode_accelerator.cc |
+++ b/content/common/gpu/media/v4l2_video_decode_accelerator.cc |
@@ -370,16 +370,14 @@ void V4L2VideoDecodeAccelerator::AssignPictureBuffers( |
attrs[13] = output_record.fds[1]; |
attrs[15] = 0; |
attrs[17] = frame_buffer_size_.width(); |
- EGLImageKHR egl_image = eglCreateImageKHR( |
- egl_display_, EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, attrs); |
+ EGLImageKHR egl_image = device_->CreateEGLImage( |
+ egl_display_, attrs, buffers[i].texture_id(), i); |
if (egl_image == EGL_NO_IMAGE_KHR) { |
DLOG(ERROR) << "AssignPictureBuffers(): could not create EGLImageKHR"; |
Ami GONE FROM CHROMIUM
2014/02/07 09:09:30
already logged in the Device method; unnecessary
already logged in the Device method; unnecessary
|
NOTIFY_ERROR(PLATFORM_FAILURE); |
return; |
} |
- glBindTexture(GL_TEXTURE_EXTERNAL_OES, buffers[i].texture_id()); |
- glEGLImageTargetTexture2DOES(GL_TEXTURE_EXTERNAL_OES, egl_image); |
Pawel Osciak
2014/02/10 06:36:17
I would like to understand the big picture here pl
I would like to understand the big picture here please.
We strive to stay as close as possible to using (and/or creating)
platform-independent standards where we can, like the sequence above, instead of
providing custom calls for each platform. Removing this from here and TVDA is a
step into an opposite direction, and I would like to understand what technical
difficulties force us to do this first.
Binding textures to EGLImages also serves to keep track of ownership. There are
multiple users of the shared buffer, the renderer, the GPU that renders the
textures, this class and the HW codec. How is ownership/destruction managed and
how is it ensured that the buffer is valid while any of the users are still
referring to/using it (both in userspace and in kernel)?
What happens if the renderer crashes and the codec is writing to the textures?
What happens when this class is destroyed, but the texture is in the renderer?
What happens when the whole Chrome crashes, but the HW codec is using a buffer
(i.e. kernel has ownership)?
Could you please explain how is ownership managed for shared buffers on Tegra?
shivdasp
2014/02/10 13:31:17
The decoder's output buffers are created when REQB
The decoder's output buffers are created when REQBUFS(x) is called on
CAPTURE_PLANE. These buffers are hardware buffers which can be shared with the
AVP processor for decoder to write into.
Then V4L2VDA triggers ProvidePictureBuffers() on which textures are created and
sent back in AssignPictureBuffers().
Now V4L2VDA creates EglImages from these textures and sends each EglImage handle
to library using the QUERYBUF (but can use a custom call too). The tegrav4l2
library cannot create EglImages from DMABUFS like in Exynos since there is no
such extension. We create EglImage from this texture itself so there is a
binding between texture and eglImage.
Now when this EglImage is sent to libtegrav4l2, it is mapped with the
corresponding decoder buffer created in REQBUF() call.
This way there is one map of EglImage, texture and decoder buffer.
When any buffer is enqueued in QBUF, the library sends it down to the decoder.
Once the decoder buffer is ready, the library uses graphics apis to populate the
corresponding EglImage with the RGB data and then pushes into a queue thereby
making it available for DQBUF after which this buffer can be used only when it
is back in QBUF call.
This way the buffer ownership is managed.
So in summary the library uses queues and does all the buffer management between
decoder and the graphics stack for conversion.
On 2014/02/10 06:36:17, Pawel Osciak wrote:
> I would like to understand the big picture here please.
>
> We strive to stay as close as possible to using (and/or creating)
> platform-independent standards where we can, like the sequence above, instead
of
> providing custom calls for each platform. Removing this from here and TVDA is
a
> step into an opposite direction, and I would like to understand what technical
> difficulties force us to do this first.
>
> Binding textures to EGLImages also serves to keep track of ownership. There
are
> multiple users of the shared buffer, the renderer, the GPU that renders the
> textures, this class and the HW codec. How is ownership/destruction managed
and
> how is it ensured that the buffer is valid while any of the users are still
> referring to/using it (both in userspace and in kernel)?
>
> What happens if the renderer crashes and the codec is writing to the textures?
> What happens when this class is destroyed, but the texture is in the renderer?
> What happens when the whole Chrome crashes, but the HW codec is using a buffer
> (i.e. kernel has ownership)?
>
> Could you please explain how is ownership managed for shared buffers on Tegra?
Pawel Osciak
2014/02/12 09:15:13
By decoder do you mean V4L2VDA class?
On 2014/02/10 13:31:17, shivdasp wrote:
> The decoder's output buffers are created when REQBUFS(x) is called on
> CAPTURE_PLANE. These buffers are hardware buffers which can be shared with the
> AVP processor for decoder to write into.
By decoder do you mean V4L2VDA class?
> Then V4L2VDA triggers ProvidePictureBuffers() on which textures are created
and
> sent back in AssignPictureBuffers().
> Now V4L2VDA creates EglImages from these textures and sends each EglImage
handle
> to library using the QUERYBUF (but can use a custom call too). The tegrav4l2
> library cannot create EglImages from DMABUFS like in Exynos since there is no
> such extension. We create EglImage from this texture itself so there is a
> binding between texture and eglImage.
Sounds like the eglCreateImage extension taking offsets I described in the
comment in tegra_v4l2_video_device.cc could work for this?
> Now when this EglImage is sent to libtegrav4l2, it is mapped with the
> corresponding decoder buffer created in REQBUF() call.
> This way there is one map of EglImage, texture and decoder buffer.
My understanding is you mean the buffer is bound to a texture? If so, then it
also seems like we could use the current bind texture to eglimage calls?
> When any buffer is enqueued in QBUF, the library sends it down to the decoder.
> Once the decoder buffer is ready, the library uses graphics apis to populate
the
> corresponding EglImage with the RGB data and then pushes into a queue thereby
> making it available for DQBUF after which this buffer can be used only when it
> is back in QBUF call.
> This way the buffer ownership is managed.
> So in summary the library uses queues and does all the buffer management
between
> decoder and the graphics stack for conversion.
What happens when this class calls REQBUFS(0), but the corresponding textures
are being rendered to the screen?
How will the buffers be freed if the GPU process crashes without calling
REQBUFS(0)?
What happens when the bound textures are deleted, but the HW codec is still
using them?
>
> On 2014/02/10 06:36:17, Pawel Osciak wrote:
> > I would like to understand the big picture here please.
> >
> > We strive to stay as close as possible to using (and/or creating)
> > platform-independent standards where we can, like the sequence above,
instead
> of
> > providing custom calls for each platform. Removing this from here and TVDA
is
> a
> > step into an opposite direction, and I would like to understand what
technical
> > difficulties force us to do this first.
> >
> > Binding textures to EGLImages also serves to keep track of ownership. There
> are
> > multiple users of the shared buffer, the renderer, the GPU that renders the
> > textures, this class and the HW codec. How is ownership/destruction managed
> and
> > how is it ensured that the buffer is valid while any of the users are still
> > referring to/using it (both in userspace and in kernel)?
> >
> > What happens if the renderer crashes and the codec is writing to the
textures?
> > What happens when this class is destroyed, but the texture is in the
renderer?
> > What happens when the whole Chrome crashes, but the HW codec is using a
buffer
> > (i.e. kernel has ownership)?
> >
> > Could you please explain how is ownership managed for shared buffers on
Tegra?
>
shivdasp
2014/02/12 10:11:55
No I meant the decoder entity within the library.
On 2014/02/12 09:15:13, Pawel Osciak wrote:
> On 2014/02/10 13:31:17, shivdasp wrote:
> > The decoder's output buffers are created when REQBUFS(x) is called on
> > CAPTURE_PLANE. These buffers are hardware buffers which can be shared with
the
> > AVP processor for decoder to write into.
>
> By decoder do you mean V4L2VDA class?
No I meant the decoder entity within the library.
>
> > Then V4L2VDA triggers ProvidePictureBuffers() on which textures are created
> and
> > sent back in AssignPictureBuffers().
> > Now V4L2VDA creates EglImages from these textures and sends each EglImage
> handle
> > to library using the QUERYBUF (but can use a custom call too). The tegrav4l2
> > library cannot create EglImages from DMABUFS like in Exynos since there is
no
> > such extension. We create EglImage from this texture itself so there is a
> > binding between texture and eglImage.
>
> Sounds like the eglCreateImage extension taking offsets I described in the
> comment in tegra_v4l2_video_device.cc could work for this?
Unfortunately there is no such extension today.
>
> > Now when this EglImage is sent to libtegrav4l2, it is mapped with the
> > corresponding decoder buffer created in REQBUF() call.
> > This way there is one map of EglImage, texture and decoder buffer.
>
> My understanding is you mean the buffer is bound to a texture? If so, then it
> also seems like we could use the current bind texture to eglimage calls?
The libtegrav4l2 talks to another internal library which actually creates the
YUV buffer. This is what is given to the AVP and where the decoded output is
actually filled.
There is a corresponding RGB buffer created when the EGLImage is called, this is
owned by the graphics library. While enqueuing buffers for CAPTURE PLANE, there
is a conversion performed to do YUV to RGB.
>
> > When any buffer is enqueued in QBUF, the library sends it down to the
decoder.
> > Once the decoder buffer is ready, the library uses graphics apis to populate
> the
> > corresponding EglImage with the RGB data and then pushes into a queue
thereby
> > making it available for DQBUF after which this buffer can be used only when
it
> > is back in QBUF call.
> > This way the buffer ownership is managed.
> > So in summary the library uses queues and does all the buffer management
> between
> > decoder and the graphics stack for conversion.
>
> What happens when this class calls REQBUFS(0), but the corresponding textures
> are being rendered to the screen?
> How will the buffers be freed if the GPU process crashes without calling
> REQBUFS(0)?
> What happens when the bound textures are deleted, but the HW codec is still
> using them?
I guess I am missing something here. I did not understand "REQBUFS(0) is called
but corresponding textures are being rendered ?". Doesn't DestroyOutputBuffers()
call guarantee that buffers on CAPTURE plane are no longer used.
I will confirm about the buffer freeing in gpu process crash scenario.
The last scenario (bound texture are deleted but HW codec is still using them)
is taken care by the conversion step performed using the library. The texture is
bound to the EGlImage. So that binding will fail. Since the libtegrav4l2 has the
EglImage backed by a RGB buffer the conversion can happen. How can I test this
scenario ?
>
> >
> > On 2014/02/10 06:36:17, Pawel Osciak wrote:
> > > I would like to understand the big picture here please.
> > >
> > > We strive to stay as close as possible to using (and/or creating)
> > > platform-independent standards where we can, like the sequence above,
> instead
> > of
> > > providing custom calls for each platform. Removing this from here and TVDA
> is
> > a
> > > step into an opposite direction, and I would like to understand what
> technical
> > > difficulties force us to do this first.
> > >
> > > Binding textures to EGLImages also serves to keep track of ownership.
There
> > are
> > > multiple users of the shared buffer, the renderer, the GPU that renders
the
> > > textures, this class and the HW codec. How is ownership/destruction
managed
> > and
> > > how is it ensured that the buffer is valid while any of the users are
still
> > > referring to/using it (both in userspace and in kernel)?
> > >
> > > What happens if the renderer crashes and the codec is writing to the
> textures?
> > > What happens when this class is destroyed, but the texture is in the
> renderer?
> > > What happens when the whole Chrome crashes, but the HW codec is using a
> buffer
> > > (i.e. kernel has ownership)?
> > >
> > > Could you please explain how is ownership managed for shared buffers on
> Tegra?
> >
>
Pawel Osciak
2014/02/13 10:42:54
So the YUV buffers are tied to the textures someho
On 2014/02/12 10:11:55, shivdasp wrote:
> On 2014/02/12 09:15:13, Pawel Osciak wrote:
> > On 2014/02/10 13:31:17, shivdasp wrote:
> > > The decoder's output buffers are created when REQBUFS(x) is called on
> > > CAPTURE_PLANE. These buffers are hardware buffers which can be shared with
> the
> > > AVP processor for decoder to write into.
> >
> > By decoder do you mean V4L2VDA class?
>
> No I meant the decoder entity within the library.
> >
> > > Then V4L2VDA triggers ProvidePictureBuffers() on which textures are
created
> > and
> > > sent back in AssignPictureBuffers().
> > > Now V4L2VDA creates EglImages from these textures and sends each EglImage
> > handle
> > > to library using the QUERYBUF (but can use a custom call too). The
tegrav4l2
> > > library cannot create EglImages from DMABUFS like in Exynos since there is
> no
> > > such extension. We create EglImage from this texture itself so there is a
> > > binding between texture and eglImage.
> >
> > Sounds like the eglCreateImage extension taking offsets I described in the
> > comment in tegra_v4l2_video_device.cc could work for this?
> Unfortunately there is no such extension today.
> >
> > > Now when this EglImage is sent to libtegrav4l2, it is mapped with the
> > > corresponding decoder buffer created in REQBUF() call.
> > > This way there is one map of EglImage, texture and decoder buffer.
> >
> > My understanding is you mean the buffer is bound to a texture? If so, then
it
> > also seems like we could use the current bind texture to eglimage calls?
> The libtegrav4l2 talks to another internal library which actually creates the
> YUV buffer. This is what is given to the AVP and where the decoded output is
> actually filled.
> There is a corresponding RGB buffer created when the EGLImage is called, this
is
> owned by the graphics library. While enqueuing buffers for CAPTURE PLANE,
there
> is a conversion performed to do YUV to RGB.
So the YUV buffers are tied to the textures somehow?
> >
> > > When any buffer is enqueued in QBUF, the library sends it down to the
> decoder.
> > > Once the decoder buffer is ready, the library uses graphics apis to
populate
> > the
> > > corresponding EglImage with the RGB data and then pushes into a queue
> thereby
> > > making it available for DQBUF after which this buffer can be used only
when
> it
> > > is back in QBUF call.
> > > This way the buffer ownership is managed.
> > > So in summary the library uses queues and does all the buffer management
> > between
> > > decoder and the graphics stack for conversion.
> >
> > What happens when this class calls REQBUFS(0), but the corresponding
textures
> > are being rendered to the screen?
> > How will the buffers be freed if the GPU process crashes without calling
> > REQBUFS(0)?
> > What happens when the bound textures are deleted, but the HW codec is still
> > using them?
>
> I guess I am missing something here. I did not understand "REQBUFS(0) is
called
> but corresponding textures are being rendered ?". Doesn't
DestroyOutputBuffers()
> call guarantee that buffers on CAPTURE plane are no longer used.
The underlying memory can still be used as textures in the client of VDA class.
It only guarantees that they are not used anymore by the codec class as
v4l2_buffers.
> I will confirm about the buffer freeing in gpu process crash scenario.
Thanks.
> The last scenario (bound texture are deleted but HW codec is still using them)
> is taken care by the conversion step performed using the library.
> The texture is
> bound to the EGlImage. So that binding will fail. Since the libtegrav4l2 has
the
> EglImage backed by a RGB buffer the conversion can happen. How can I test this
> scenario ?
This is just a case where there is a bug in the code, but my point is that the
ownership should be shared with the kernel as well, so if the userspace (Chrome)
dies, the kernel will properly clean up.
>
> >
> > >
> > > On 2014/02/10 06:36:17, Pawel Osciak wrote:
> > > > I would like to understand the big picture here please.
> > > >
> > > > We strive to stay as close as possible to using (and/or creating)
> > > > platform-independent standards where we can, like the sequence above,
> > instead
> > > of
> > > > providing custom calls for each platform. Removing this from here and
TVDA
> > is
> > > a
> > > > step into an opposite direction, and I would like to understand what
> > technical
> > > > difficulties force us to do this first.
> > > >
> > > > Binding textures to EGLImages also serves to keep track of ownership.
> There
> > > are
> > > > multiple users of the shared buffer, the renderer, the GPU that renders
> the
> > > > textures, this class and the HW codec. How is ownership/destruction
> managed
> > > and
> > > > how is it ensured that the buffer is valid while any of the users are
> still
> > > > referring to/using it (both in userspace and in kernel)?
> > > >
> > > > What happens if the renderer crashes and the codec is writing to the
> > textures?
> > > > What happens when this class is destroyed, but the texture is in the
> > renderer?
> > > > What happens when the whole Chrome crashes, but the HW codec is using a
> > buffer
> > > > (i.e. kernel has ownership)?
> > > >
> > > > Could you please explain how is ownership managed for shared buffers on
> > Tegra?
> > >
> >
>
shivdasp
2014/02/14 03:06:45
We send texture_id to eglCreateImageKHR and bind i
On 2014/02/13 10:42:54, Pawel Osciak wrote:
> On 2014/02/12 10:11:55, shivdasp wrote:
> > On 2014/02/12 09:15:13, Pawel Osciak wrote:
> > > On 2014/02/10 13:31:17, shivdasp wrote:
> > > > The decoder's output buffers are created when REQBUFS(x) is called on
> > > > CAPTURE_PLANE. These buffers are hardware buffers which can be shared
with
> > the
> > > > AVP processor for decoder to write into.
> > >
> > > By decoder do you mean V4L2VDA class?
> >
> > No I meant the decoder entity within the library.
> > >
> > > > Then V4L2VDA triggers ProvidePictureBuffers() on which textures are
> created
> > > and
> > > > sent back in AssignPictureBuffers().
> > > > Now V4L2VDA creates EglImages from these textures and sends each
EglImage
> > > handle
> > > > to library using the QUERYBUF (but can use a custom call too). The
> tegrav4l2
> > > > library cannot create EglImages from DMABUFS like in Exynos since there
is
> > no
> > > > such extension. We create EglImage from this texture itself so there is
a
> > > > binding between texture and eglImage.
> > >
> > > Sounds like the eglCreateImage extension taking offsets I described in the
> > > comment in tegra_v4l2_video_device.cc could work for this?
> > Unfortunately there is no such extension today.
> > >
> > > > Now when this EglImage is sent to libtegrav4l2, it is mapped with the
> > > > corresponding decoder buffer created in REQBUF() call.
> > > > This way there is one map of EglImage, texture and decoder buffer.
> > >
> > > My understanding is you mean the buffer is bound to a texture? If so, then
> it
> > > also seems like we could use the current bind texture to eglimage calls?
> > The libtegrav4l2 talks to another internal library which actually creates
the
> > YUV buffer. This is what is given to the AVP and where the decoded output is
> > actually filled.
> > There is a corresponding RGB buffer created when the EGLImage is called,
this
> is
> > owned by the graphics library. While enqueuing buffers for CAPTURE PLANE,
> there
> > is a conversion performed to do YUV to RGB.
>
> So the YUV buffers are tied to the textures somehow?
We send texture_id to eglCreateImageKHR and bind it there. And eglImage is sent
to the library which maps it to its YUV buffer.
My subsequent patch will probably make this clearer.
>
> > >
> > > > When any buffer is enqueued in QBUF, the library sends it down to the
> > decoder.
> > > > Once the decoder buffer is ready, the library uses graphics apis to
> populate
> > > the
> > > > corresponding EglImage with the RGB data and then pushes into a queue
> > thereby
> > > > making it available for DQBUF after which this buffer can be used only
> when
> > it
> > > > is back in QBUF call.
> > > > This way the buffer ownership is managed.
> > > > So in summary the library uses queues and does all the buffer management
> > > between
> > > > decoder and the graphics stack for conversion.
> > >
> > > What happens when this class calls REQBUFS(0), but the corresponding
> textures
> > > are being rendered to the screen?
> > > How will the buffers be freed if the GPU process crashes without calling
> > > REQBUFS(0)?
> > > What happens when the bound textures are deleted, but the HW codec is
still
> > > using them?
> >
> > I guess I am missing something here. I did not understand "REQBUFS(0) is
> called
> > but corresponding textures are being rendered ?". Doesn't
> DestroyOutputBuffers()
> > call guarantee that buffers on CAPTURE plane are no longer used.
>
> The underlying memory can still be used as textures in the client of VDA
class.
> It only guarantees that they are not used anymore by the codec class as
> v4l2_buffers.
>
> > I will confirm about the buffer freeing in gpu process crash scenario.
>
> Thanks.
If the EGLimage is destroyed I think the texture becomes unbound. I was
debugging some scenario and I get errors as "texture not bound or texture id 0"
kind of errors from gles2_cmd_decoder.cc. These I guess represent this crash
scenario. So it is taken care of already while validating the texture before
rendering ?
And I observe similar kind of logs on Exynos too.
Do you have a test case or steps of validating this ? Will killing gpu process
while video playback validate this path ?
>
> > The last scenario (bound texture are deleted but HW codec is still using
them)
> > is taken care by the conversion step performed using the library.
> > The texture is
> > bound to the EGlImage. So that binding will fail. Since the libtegrav4l2 has
> the
> > EglImage backed by a RGB buffer the conversion can happen. How can I test
this
> > scenario ?
>
> This is just a case where there is a bug in the code, but my point is that the
> ownership should be shared with the kernel as well, so if the userspace
(Chrome)
> dies, the kernel will properly clean up.
>
> >
> > >
> > > >
> > > > On 2014/02/10 06:36:17, Pawel Osciak wrote:
> > > > > I would like to understand the big picture here please.
> > > > >
> > > > > We strive to stay as close as possible to using (and/or creating)
> > > > > platform-independent standards where we can, like the sequence above,
> > > instead
> > > > of
> > > > > providing custom calls for each platform. Removing this from here and
> TVDA
> > > is
> > > > a
> > > > > step into an opposite direction, and I would like to understand what
> > > technical
> > > > > difficulties force us to do this first.
> > > > >
> > > > > Binding textures to EGLImages also serves to keep track of ownership.
> > There
> > > > are
> > > > > multiple users of the shared buffer, the renderer, the GPU that
renders
> > the
> > > > > textures, this class and the HW codec. How is ownership/destruction
> > managed
> > > > and
> > > > > how is it ensured that the buffer is valid while any of the users are
> > still
> > > > > referring to/using it (both in userspace and in kernel)?
> > > > >
> > > > > What happens if the renderer crashes and the codec is writing to the
> > > textures?
> > > > > What happens when this class is destroyed, but the texture is in the
> > > renderer?
> > > > > What happens when the whole Chrome crashes, but the HW codec is using
a
> > > buffer
> > > > > (i.e. kernel has ownership)?
> > > > >
> > > > > Could you please explain how is ownership managed for shared buffers
on
> > > Tegra?
> > > >
> > >
> >
>
Pawel Osciak
2014/02/14 07:36:10
Wait, where do you send texture_id to eglCreateIma
On 2014/02/14 03:06:45, shivdasp wrote:
> On 2014/02/13 10:42:54, Pawel Osciak wrote:
> > On 2014/02/12 10:11:55, shivdasp wrote:
> > > On 2014/02/12 09:15:13, Pawel Osciak wrote:
> > > > On 2014/02/10 13:31:17, shivdasp wrote:
> > > > > The decoder's output buffers are created when REQBUFS(x) is called on
> > > > > CAPTURE_PLANE. These buffers are hardware buffers which can be shared
> with
> > > the
> > > > > AVP processor for decoder to write into.
> > > >
> > > > By decoder do you mean V4L2VDA class?
> > >
> > > No I meant the decoder entity within the library.
> > > >
> > > > > Then V4L2VDA triggers ProvidePictureBuffers() on which textures are
> > created
> > > > and
> > > > > sent back in AssignPictureBuffers().
> > > > > Now V4L2VDA creates EglImages from these textures and sends each
> EglImage
> > > > handle
> > > > > to library using the QUERYBUF (but can use a custom call too). The
> > tegrav4l2
> > > > > library cannot create EglImages from DMABUFS like in Exynos since
there
> is
> > > no
> > > > > such extension. We create EglImage from this texture itself so there
is
> a
> > > > > binding between texture and eglImage.
> > > >
> > > > Sounds like the eglCreateImage extension taking offsets I described in
the
> > > > comment in tegra_v4l2_video_device.cc could work for this?
> > > Unfortunately there is no such extension today.
> > > >
> > > > > Now when this EglImage is sent to libtegrav4l2, it is mapped with the
> > > > > corresponding decoder buffer created in REQBUF() call.
> > > > > This way there is one map of EglImage, texture and decoder buffer.
> > > >
> > > > My understanding is you mean the buffer is bound to a texture? If so,
then
> > it
> > > > also seems like we could use the current bind texture to eglimage calls?
> > > The libtegrav4l2 talks to another internal library which actually creates
> the
> > > YUV buffer. This is what is given to the AVP and where the decoded output
is
> > > actually filled.
> > > There is a corresponding RGB buffer created when the EGLImage is called,
> this
> > is
> > > owned by the graphics library. While enqueuing buffers for CAPTURE PLANE,
> > there
> > > is a conversion performed to do YUV to RGB.
> >
> > So the YUV buffers are tied to the textures somehow?
> We send texture_id to eglCreateImageKHR and bind it there. And eglImage is
sent
> to the library which maps it to its YUV buffer.
> My subsequent patch will probably make this clearer.
Wait, where do you send texture_id to eglCreateImageKHR? I don't see that in the
code above.
Do you have an extension for eglCreateImageKHR to also accept texture ids and
bind during creation? Why not do this in the standard way, i.e. by using
GL_OES_EGL_image_external? I would expect your EGL implementation already has it
for other things (and it's an extension created by NVIDIA too)...
Or did you mean some other function?
> >
> > > >
> > > > > When any buffer is enqueued in QBUF, the library sends it down to the
> > > decoder.
> > > > > Once the decoder buffer is ready, the library uses graphics apis to
> > populate
> > > > the
> > > > > corresponding EglImage with the RGB data and then pushes into a queue
> > > thereby
> > > > > making it available for DQBUF after which this buffer can be used only
> > when
> > > it
> > > > > is back in QBUF call.
> > > > > This way the buffer ownership is managed.
> > > > > So in summary the library uses queues and does all the buffer
management
> > > > between
> > > > > decoder and the graphics stack for conversion.
> > > >
> > > > What happens when this class calls REQBUFS(0), but the corresponding
> > textures
> > > > are being rendered to the screen?
> > > > How will the buffers be freed if the GPU process crashes without calling
> > > > REQBUFS(0)?
> > > > What happens when the bound textures are deleted, but the HW codec is
> still
> > > > using them?
> > >
> > > I guess I am missing something here. I did not understand "REQBUFS(0) is
> > called
> > > but corresponding textures are being rendered ?". Doesn't
> > DestroyOutputBuffers()
> > > call guarantee that buffers on CAPTURE plane are no longer used.
> >
> > The underlying memory can still be used as textures in the client of VDA
> class.
> > It only guarantees that they are not used anymore by the codec class as
> > v4l2_buffers.
> >
> > > I will confirm about the buffer freeing in gpu process crash scenario.
> >
> > Thanks.
> If the EGLimage is destroyed I think the texture becomes unbound. I was
> debugging some scenario and I get errors as "texture not bound or texture id
0"
> kind of errors from gles2_cmd_decoder.cc. These I guess represent this crash
> scenario. So it is taken care of already while validating the texture before
> rendering ?
If you are getting those errors, then there is definitely something wrong going
on.
> And I observe similar kind of logs on Exynos too.
That is even more worrying. Could you please submit a bug for Exynos with repro
steps?
> Do you have a test case or steps of validating this ? Will killing gpu process
> while video playback validate this path ?
It should.
> >
> > > The last scenario (bound texture are deleted but HW codec is still using
> them)
> > > is taken care by the conversion step performed using the library.
> > > The texture is
> > > bound to the EGlImage. So that binding will fail. Since the libtegrav4l2
has
> > the
> > > EglImage backed by a RGB buffer the conversion can happen. How can I test
> this
> > > scenario ?
> >
> > This is just a case where there is a bug in the code, but my point is that
the
> > ownership should be shared with the kernel as well, so if the userspace
> (Chrome)
> > dies, the kernel will properly clean up.
> >
> > >
> > > >
> > > > >
> > > > > On 2014/02/10 06:36:17, Pawel Osciak wrote:
> > > > > > I would like to understand the big picture here please.
> > > > > >
> > > > > > We strive to stay as close as possible to using (and/or creating)
> > > > > > platform-independent standards where we can, like the sequence
above,
> > > > instead
> > > > > of
> > > > > > providing custom calls for each platform. Removing this from here
and
> > TVDA
> > > > is
> > > > > a
> > > > > > step into an opposite direction, and I would like to understand what
> > > > technical
> > > > > > difficulties force us to do this first.
> > > > > >
> > > > > > Binding textures to EGLImages also serves to keep track of
ownership.
> > > There
> > > > > are
> > > > > > multiple users of the shared buffer, the renderer, the GPU that
> renders
> > > the
> > > > > > textures, this class and the HW codec. How is ownership/destruction
> > > managed
> > > > > and
> > > > > > how is it ensured that the buffer is valid while any of the users
are
> > > still
> > > > > > referring to/using it (both in userspace and in kernel)?
> > > > > >
> > > > > > What happens if the renderer crashes and the codec is writing to the
> > > > textures?
> > > > > > What happens when this class is destroyed, but the texture is in the
> > > > renderer?
> > > > > > What happens when the whole Chrome crashes, but the HW codec is
using
> a
> > > > buffer
> > > > > > (i.e. kernel has ownership)?
> > > > > >
> > > > > > Could you please explain how is ownership managed for shared buffers
> on
> > > > Tegra?
> > > > >
> > > >
> > >
> >
>
shivdasp
2014/02/14 09:18:58
The texture_id is sent in eglCreateImageKHR parame
On 2014/02/14 07:36:10, Pawel Osciak wrote:
> On 2014/02/14 03:06:45, shivdasp wrote:
> > On 2014/02/13 10:42:54, Pawel Osciak wrote:
> > > On 2014/02/12 10:11:55, shivdasp wrote:
> > > > On 2014/02/12 09:15:13, Pawel Osciak wrote:
> > > > > On 2014/02/10 13:31:17, shivdasp wrote:
> > > > > > The decoder's output buffers are created when REQBUFS(x) is called
on
> > > > > > CAPTURE_PLANE. These buffers are hardware buffers which can be
shared
> > with
> > > > the
> > > > > > AVP processor for decoder to write into.
> > > > >
> > > > > By decoder do you mean V4L2VDA class?
> > > >
> > > > No I meant the decoder entity within the library.
> > > > >
> > > > > > Then V4L2VDA triggers ProvidePictureBuffers() on which textures are
> > > created
> > > > > and
> > > > > > sent back in AssignPictureBuffers().
> > > > > > Now V4L2VDA creates EglImages from these textures and sends each
> > EglImage
> > > > > handle
> > > > > > to library using the QUERYBUF (but can use a custom call too). The
> > > tegrav4l2
> > > > > > library cannot create EglImages from DMABUFS like in Exynos since
> there
> > is
> > > > no
> > > > > > such extension. We create EglImage from this texture itself so there
> is
> > a
> > > > > > binding between texture and eglImage.
> > > > >
> > > > > Sounds like the eglCreateImage extension taking offsets I described in
> the
> > > > > comment in tegra_v4l2_video_device.cc could work for this?
> > > > Unfortunately there is no such extension today.
> > > > >
> > > > > > Now when this EglImage is sent to libtegrav4l2, it is mapped with
the
> > > > > > corresponding decoder buffer created in REQBUF() call.
> > > > > > This way there is one map of EglImage, texture and decoder buffer.
> > > > >
> > > > > My understanding is you mean the buffer is bound to a texture? If so,
> then
> > > it
> > > > > also seems like we could use the current bind texture to eglimage
calls?
> > > > The libtegrav4l2 talks to another internal library which actually
creates
> > the
> > > > YUV buffer. This is what is given to the AVP and where the decoded
output
> is
> > > > actually filled.
> > > > There is a corresponding RGB buffer created when the EGLImage is called,
> > this
> > > is
> > > > owned by the graphics library. While enqueuing buffers for CAPTURE
PLANE,
> > > there
> > > > is a conversion performed to do YUV to RGB.
> > >
> > > So the YUV buffers are tied to the textures somehow?
> > We send texture_id to eglCreateImageKHR and bind it there. And eglImage is
> sent
> > to the library which maps it to its YUV buffer.
> > My subsequent patch will probably make this clearer.
>
> Wait, where do you send texture_id to eglCreateImageKHR? I don't see that in
the
> code above.
> Do you have an extension for eglCreateImageKHR to also accept texture ids and
> bind during creation? Why not do this in the standard way, i.e. by using
> GL_OES_EGL_image_external? I would expect your EGL implementation already has
it
> for other things (and it's an extension created by NVIDIA too)...
> Or did you mean some other function?
The texture_id is sent in eglCreateImageKHR parameter. See TegraV4L2Device
implementation of CreateEGLImage().
I will submit a bug with my findings and repro steps.
>
> > >
> > > > >
> > > > > > When any buffer is enqueued in QBUF, the library sends it down to
the
> > > > decoder.
> > > > > > Once the decoder buffer is ready, the library uses graphics apis to
> > > populate
> > > > > the
> > > > > > corresponding EglImage with the RGB data and then pushes into a
queue
> > > > thereby
> > > > > > making it available for DQBUF after which this buffer can be used
only
> > > when
> > > > it
> > > > > > is back in QBUF call.
> > > > > > This way the buffer ownership is managed.
> > > > > > So in summary the library uses queues and does all the buffer
> management
> > > > > between
> > > > > > decoder and the graphics stack for conversion.
> > > > >
> > > > > What happens when this class calls REQBUFS(0), but the corresponding
> > > textures
> > > > > are being rendered to the screen?
> > > > > How will the buffers be freed if the GPU process crashes without
calling
> > > > > REQBUFS(0)?
> > > > > What happens when the bound textures are deleted, but the HW codec is
> > still
> > > > > using them?
> > > >
> > > > I guess I am missing something here. I did not understand "REQBUFS(0) is
> > > called
> > > > but corresponding textures are being rendered ?". Doesn't
> > > DestroyOutputBuffers()
> > > > call guarantee that buffers on CAPTURE plane are no longer used.
> > >
> > > The underlying memory can still be used as textures in the client of VDA
> > class.
> > > It only guarantees that they are not used anymore by the codec class as
> > > v4l2_buffers.
> > >
> > > > I will confirm about the buffer freeing in gpu process crash scenario.
> > >
> > > Thanks.
> > If the EGLimage is destroyed I think the texture becomes unbound. I was
> > debugging some scenario and I get errors as "texture not bound or texture id
> 0"
> > kind of errors from gles2_cmd_decoder.cc. These I guess represent this crash
> > scenario. So it is taken care of already while validating the texture before
> > rendering ?
>
> If you are getting those errors, then there is definitely something wrong
going
> on.
>
> > And I observe similar kind of logs on Exynos too.
>
> That is even more worrying. Could you please submit a bug for Exynos with
repro
> steps?
>
> > Do you have a test case or steps of validating this ? Will killing gpu
process
> > while video playback validate this path ?
>
> It should.
>
> > >
> > > > The last scenario (bound texture are deleted but HW codec is still using
> > them)
> > > > is taken care by the conversion step performed using the library.
> > > > The texture is
> > > > bound to the EGlImage. So that binding will fail. Since the libtegrav4l2
> has
> > > the
> > > > EglImage backed by a RGB buffer the conversion can happen. How can I
test
> > this
> > > > scenario ?
> > >
> > > This is just a case where there is a bug in the code, but my point is that
> the
> > > ownership should be shared with the kernel as well, so if the userspace
> > (Chrome)
> > > dies, the kernel will properly clean up.
> > >
> > > >
> > > > >
> > > > > >
> > > > > > On 2014/02/10 06:36:17, Pawel Osciak wrote:
> > > > > > > I would like to understand the big picture here please.
> > > > > > >
> > > > > > > We strive to stay as close as possible to using (and/or creating)
> > > > > > > platform-independent standards where we can, like the sequence
> above,
> > > > > instead
> > > > > > of
> > > > > > > providing custom calls for each platform. Removing this from here
> and
> > > TVDA
> > > > > is
> > > > > > a
> > > > > > > step into an opposite direction, and I would like to understand
what
> > > > > technical
> > > > > > > difficulties force us to do this first.
> > > > > > >
> > > > > > > Binding textures to EGLImages also serves to keep track of
> ownership.
> > > > There
> > > > > > are
> > > > > > > multiple users of the shared buffer, the renderer, the GPU that
> > renders
> > > > the
> > > > > > > textures, this class and the HW codec. How is
ownership/destruction
> > > > managed
> > > > > > and
> > > > > > > how is it ensured that the buffer is valid while any of the users
> are
> > > > still
> > > > > > > referring to/using it (both in userspace and in kernel)?
> > > > > > >
> > > > > > > What happens if the renderer crashes and the codec is writing to
the
> > > > > textures?
> > > > > > > What happens when this class is destroyed, but the texture is in
the
> > > > > renderer?
> > > > > > > What happens when the whole Chrome crashes, but the HW codec is
> using
> > a
> > > > > buffer
> > > > > > > (i.e. kernel has ownership)?
> > > > > > >
> > > > > > > Could you please explain how is ownership managed for shared
buffers
> > on
> > > > > Tegra?
> > > > > >
> > > > >
> > > >
> > >
> >
>
|
picture_buffers_ref->picture_buffers.push_back( |
PictureBufferArrayRef::PictureBufferRef(egl_image, buffers[i].id())); |
} |
@@ -1640,7 +1638,8 @@ bool V4L2VideoDecodeAccelerator::GetFormatInfo(struct v4l2_format* format, |
*again = false; |
memset(format, 0, sizeof(*format)); |
format->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
- if (HANDLE_EINTR(device_->Ioctl(VIDIOC_G_FMT, format)) != 0) { |
+ |
+ if (HANDLE_EINTR(device_->Ioctl(VIDIOC_G_FMT, format) != 0)) { |
Ami GONE FROM CHROMIUM
2014/02/07 09:09:30
This is a bug!
This is a bug!
|
if (errno == EINVAL) { |
// EINVAL means we haven't seen sufficient stream to decode the format. |
*again = true; |
@@ -1652,6 +1651,11 @@ bool V4L2VideoDecodeAccelerator::GetFormatInfo(struct v4l2_format* format, |
} |
} |
+ // Since the underlying library at the moment is not updated this hack |
Ami GONE FROM CHROMIUM
2014/02/07 09:09:30
This comment is opaque to me.
Also, if something i
This comment is opaque to me.
Also, if something is a hack, usually there should be a TODO/crbug to go along
with it.
Pawel Osciak
2014/02/10 06:36:17
Could this hack be moved into the library itself t
Could this hack be moved into the library itself then?
This class should not have any awareness of device-specific issues.
shivdasp
2014/02/10 13:31:17
Apologies. This code was not meant to be here. Wil
Apologies. This code was not meant to be here. Will remove it.
On 2014/02/10 06:36:17, Pawel Osciak wrote:
> Could this hack be moved into the library itself then?
> This class should not have any awareness of device-specific issues.
|
+ LOG(ERROR) << "Hardcoding the values here "; |
+ format->fmt.pix_mp.num_planes = 2; |
+ format->fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12M; |
+ |
return true; |
} |
@@ -1785,7 +1789,7 @@ bool V4L2VideoDecodeAccelerator::CreateOutputBuffers() { |
client_, |
output_buffer_map_.size(), |
frame_buffer_size_, |
- GL_TEXTURE_EXTERNAL_OES)); |
+ device_->GetTextureTarget())); |
return true; |
} |