OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "vaapi_h264_decoder.h" | |
6 | |
7 #include <algorithm> | |
8 #include <dlfcn.h> | |
9 | |
10 #include "base/bind.h" | |
11 #include "base/stl_util.h" | |
12 #include "third_party/libva/va/va.h" | |
13 #include "third_party/libva/va/va_x11.h" | |
14 #include "ui/gfx/gl/gl_bindings.h" | |
15 | |
16 #define VA_LOG_ON_ERROR(vares, err_msg) \ | |
Ami GONE FROM CHROMIUM
2012/04/09 21:35:53
in the macro defs you use "vares" but then everywh
Ami GONE FROM CHROMIUM
2012/04/09 21:35:53
I question not returning in this case.
The 4 call-
Pawel Osciak
2012/05/03 16:22:07
Of course not :) VAAPI has no documentation whatso
Pawel Osciak
2012/05/03 16:22:07
Done.
| |
17 do { \ | |
18 if ((vares) != VA_STATUS_SUCCESS) { \ | |
19 DVLOG(1) << (err_msg) \ | |
Ami GONE FROM CHROMIUM
2012/04/09 21:35:53
The parens around err_msg are ill-advised. For ex
Pawel Osciak
2012/05/03 16:22:07
Done.
| |
20 << " VA error: " << VAAPI_ErrorStr(vares); \ | |
21 } \ | |
22 } while(0) | |
23 | |
24 #define VA_SUCCESS_OR_RETURN(vares, err_msg, ret) \ | |
Ami GONE FROM CHROMIUM
2012/04/09 21:35:53
Every single invocation of this macro uses a |ret|
Pawel Osciak
2012/05/03 16:22:07
Yeah, there used to be calls returning void. I fee
| |
25 do { \ | |
26 if ((vares) != VA_STATUS_SUCCESS) { \ | |
27 DVLOG(1) << (err_msg) \ | |
Ami GONE FROM CHROMIUM
2012/04/09 21:35:53
This could as well call VA_LOG_ON_ERROR to avoid d
Pawel Osciak
2012/05/03 16:22:07
That would require rechecking va_res anyway, so it
| |
28 << " VA error: " << VAAPI_ErrorStr(vares); \ | |
29 return (ret); \ | |
30 } \ | |
31 } while (0) | |
32 | |
33 namespace content { | |
34 | |
35 void *vaapi_handle = dlopen("libva.so", RTLD_NOW); | |
36 void *vaapi_x11_handle = dlopen("libva-x11.so", RTLD_NOW); | |
37 void *vaapi_glx_handle = dlopen("libva-glx.so", RTLD_NOW); | |
38 | |
39 typedef VADisplay (*VaapiGetDisplayGLX)(Display *dpy); | |
40 typedef int (*VaapiDisplayIsValid)(VADisplay dpy); | |
41 typedef VAStatus (*VaapiInitialize)(VADisplay dpy, | |
42 int *major_version, | |
43 int *minor_version); | |
44 typedef VAStatus (*VaapiTerminate)(VADisplay dpy); | |
45 typedef VAStatus (*VaapiGetConfigAttributes)(VADisplay dpy, | |
46 VAProfile profile, | |
47 VAEntrypoint entrypoint, | |
48 VAConfigAttrib *attrib_list, | |
49 int num_attribs); | |
50 typedef VAStatus (*VaapiCreateConfig)(VADisplay dpy, | |
51 VAProfile profile, | |
52 VAEntrypoint entrypoint, | |
53 VAConfigAttrib *attrib_list, | |
54 int num_attribs, | |
55 VAConfigID *config_id); | |
56 typedef VAStatus (*VaapiDestroyConfig)(VADisplay dpy, VAConfigID config_id); | |
57 typedef VAStatus (*VaapiCreateSurfaces)(VADisplay dpy, | |
58 int width, | |
59 int height, | |
60 int format, | |
61 int num_surfaces, | |
62 VASurfaceID *surfaces); | |
63 typedef VAStatus (*VaapiDestroySurfaces)(VADisplay dpy, | |
64 VASurfaceID *surfaces, | |
65 int num_surfaces); | |
66 typedef VAStatus (*VaapiCreateContext)(VADisplay dpy, | |
67 VAConfigID config_id, | |
68 int picture_width, | |
69 int picture_height, | |
70 int flag, | |
71 VASurfaceID *render_targets, | |
72 int num_render_targets, | |
73 VAContextID *context); | |
74 typedef VAStatus (*VaapiDestroyContext)(VADisplay dpy, VAContextID context); | |
75 typedef VAStatus (*VaapiPutSurface)(VADisplay dpy, | |
76 VASurfaceID surface, | |
77 Drawable draw, | |
78 short srcx, | |
79 short srcy, | |
80 unsigned short srcw, | |
81 unsigned short srch, | |
82 short destx, | |
83 short desty, | |
84 unsigned short destw, | |
85 unsigned short desth, | |
86 VARectangle *cliprects, | |
87 unsigned int number_cliprects, | |
88 unsigned int flags); | |
89 typedef VAStatus (*VaapiSyncSurface)(VADisplay dpy, VASurfaceID render_target); | |
90 typedef VAStatus (*VaapiBeginPicture)(VADisplay dpy, | |
91 VAContextID context, | |
92 VASurfaceID render_target); | |
93 typedef VAStatus (*VaapiRenderPicture)(VADisplay dpy, | |
94 VAContextID context, | |
95 VABufferID *buffers, | |
96 int num_buffers); | |
97 typedef VAStatus (*VaapiEndPicture)(VADisplay dpy, VAContextID context); | |
98 typedef VAStatus (*VaapiCreateBuffer)(VADisplay dpy, | |
99 VAContextID context, | |
100 VABufferType type, | |
101 unsigned int size, | |
102 unsigned int num_elements, | |
103 void *data, | |
104 VABufferID *buf_id); | |
105 typedef const char* (*VaapiErrorStr)(VAStatus error_status); | |
106 | |
107 #define VAAPI_DLSYM(name, handle) \ | |
108 Vaapi##name VAAPI_##name = \ | |
Ami GONE FROM CHROMIUM
2012/04/09 21:35:53
indent
Pawel Osciak
2012/05/03 16:22:07
Done.
| |
109 reinterpret_cast<Vaapi##name>(dlsym((handle), "va"#name)) | |
110 | |
111 VAAPI_DLSYM(GetDisplayGLX, vaapi_glx_handle); | |
112 VAAPI_DLSYM(DisplayIsValid, vaapi_handle); | |
113 VAAPI_DLSYM(Initialize, vaapi_handle); | |
114 VAAPI_DLSYM(Terminate, vaapi_handle); | |
115 VAAPI_DLSYM(GetConfigAttributes, vaapi_handle); | |
116 VAAPI_DLSYM(CreateConfig, vaapi_handle); | |
117 VAAPI_DLSYM(DestroyConfig, vaapi_handle); | |
118 VAAPI_DLSYM(CreateSurfaces, vaapi_handle); | |
119 VAAPI_DLSYM(DestroySurfaces, vaapi_handle); | |
120 VAAPI_DLSYM(CreateContext, vaapi_handle); | |
121 VAAPI_DLSYM(DestroyContext, vaapi_handle); | |
122 VAAPI_DLSYM(PutSurface, vaapi_x11_handle); | |
123 VAAPI_DLSYM(SyncSurface, vaapi_x11_handle); | |
124 VAAPI_DLSYM(BeginPicture, vaapi_handle); | |
125 VAAPI_DLSYM(RenderPicture, vaapi_handle); | |
126 VAAPI_DLSYM(EndPicture, vaapi_handle); | |
127 VAAPI_DLSYM(CreateBuffer, vaapi_handle); | |
128 VAAPI_DLSYM(ErrorStr, vaapi_handle); | |
129 | |
130 static bool AreVaapiFunctionPointersInitialized() { | |
131 return VAAPI_GetDisplayGLX | |
132 && VAAPI_DisplayIsValid | |
Ami GONE FROM CHROMIUM
2012/04/09 21:35:53
chrome-style puts operators on preceding line
Pawel Osciak
2012/05/03 16:22:07
Sigh, making stuff less readable, but oh well...
Ami GONE FROM CHROMIUM
2012/05/03 23:22:53
haha - I don't think you use the word "readable" i
| |
133 && VAAPI_Initialize | |
134 && VAAPI_Terminate | |
135 && VAAPI_GetConfigAttributes | |
136 && VAAPI_CreateConfig | |
137 && VAAPI_DestroyConfig | |
138 && VAAPI_CreateSurfaces | |
139 && VAAPI_DestroySurfaces | |
140 && VAAPI_CreateContext | |
141 && VAAPI_DestroyContext | |
142 && VAAPI_PutSurface | |
143 && VAAPI_SyncSurface | |
144 && VAAPI_BeginPicture | |
145 && VAAPI_RenderPicture | |
146 && VAAPI_EndPicture | |
147 && VAAPI_CreateBuffer | |
148 && VAAPI_ErrorStr; | |
149 } | |
150 | |
151 class VaapiH264Decoder::DecodeSurface { | |
152 public: | |
153 DecodeSurface(const GLXFBConfig& fb_config, | |
154 Display* x_display, | |
155 VADisplay va_display, | |
156 VASurfaceID va_surface_id, | |
157 int32 picture_buffer_id, | |
158 uint32 texture_id, | |
159 int width, int height); | |
160 ~DecodeSurface(); | |
161 | |
162 VASurfaceID va_surface_id() { | |
163 return va_surface_id_; | |
164 } | |
165 | |
166 int32 picture_buffer_id() { | |
167 return picture_buffer_id_; | |
168 } | |
169 | |
170 uint32 texture_id() { | |
171 return texture_id_; | |
172 } | |
173 | |
174 bool available() { | |
175 return available_; | |
176 } | |
177 | |
178 int32 input_id() { | |
179 return input_id_; | |
180 } | |
181 | |
182 int poc() { | |
183 return poc_; | |
184 } | |
185 | |
186 Pixmap x_pixmap() { | |
187 return x_pixmap_; | |
188 } | |
189 | |
190 // Associate the surface with |input_id| and |poc|, and make it unavailable | |
191 // (in use). | |
192 void Get(int32 input_id, int poc); | |
Ami GONE FROM CHROMIUM
2012/04/09 02:41:47
"Get" with void ret val and no output params is st
Pawel Osciak
2012/05/03 16:22:07
get/put is quite common for resource acquisition,
Ami GONE FROM CHROMIUM
2012/05/03 23:22:53
But Put() *is* releasing a resource!
MarkUsed/Mark
Pawel Osciak
2012/05/06 17:49:19
ok, acquire/release it is :)
| |
193 | |
194 // Make this surface available, ready to be reused. | |
195 void Put(); | |
196 | |
197 // Has to be called before output to sync texture contents. | |
198 // Returns true if successful. | |
199 bool Sync(); | |
200 | |
201 private: | |
202 Display* x_display_; | |
203 VADisplay va_display_; | |
204 VASurfaceID va_surface_id_; | |
205 | |
206 // Client-provided ids. | |
207 int32 input_id_; | |
208 int32 picture_buffer_id_; | |
209 uint32 texture_id_; | |
210 | |
211 int width_; | |
212 int height_; | |
213 | |
214 // Available for decoding (data no longer used for reference or output). | |
215 bool available_; | |
216 | |
217 // PicOrderCount | |
218 int poc_; | |
219 | |
220 // Pixmaps bound to this texture. | |
221 Pixmap x_pixmap_; | |
222 GLXPixmap glx_pixmap_; | |
223 | |
224 DISALLOW_COPY_AND_ASSIGN(DecodeSurface); | |
225 }; | |
226 | |
227 VaapiH264Decoder::DecodeSurface::DecodeSurface(const GLXFBConfig& fb_config, | |
228 Display* x_display, | |
229 VADisplay va_display, | |
230 VASurfaceID va_surface_id, | |
231 int32 picture_buffer_id, | |
232 uint32 texture_id, | |
233 int width, int height) | |
234 : x_display_(x_display), | |
235 va_display_(va_display), | |
236 va_surface_id_(va_surface_id), | |
237 picture_buffer_id_(picture_buffer_id), | |
238 texture_id_(texture_id), | |
239 width_(width), | |
240 height_(height), | |
241 available_(false) { | |
242 // Bind the surface to a texture of the given width and height, | |
243 // allocating pixmaps as needed. | |
244 glEnable(GL_TEXTURE_2D); | |
245 glBindTexture(GL_TEXTURE_2D, texture_id_); | |
246 glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); | |
247 glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); | |
248 | |
249 XWindowAttributes win_attr; | |
250 int screen = DefaultScreen(x_display_); | |
251 XGetWindowAttributes(x_display_, RootWindow(x_display_, screen), &win_attr); | |
252 x_pixmap_ = XCreatePixmap(x_display_, RootWindow(x_display_, screen), | |
253 width_, height_, win_attr.depth); | |
254 if (!x_pixmap_) { | |
255 DVLOG(1) << "Failed creating an X Pixmap for TFP"; | |
256 return; | |
257 } | |
258 | |
259 static const int pixmap_attr[] = { | |
260 GLX_TEXTURE_TARGET_EXT, GLX_TEXTURE_2D_EXT, | |
261 GLX_TEXTURE_FORMAT_EXT, GLX_TEXTURE_FORMAT_RGB_EXT, | |
262 GL_NONE, | |
263 }; | |
264 | |
265 glx_pixmap_ = glXCreatePixmap(x_display_, fb_config, x_pixmap_, | |
266 pixmap_attr); | |
267 | |
268 glBindTexture(GL_TEXTURE_2D, texture_id_); | |
269 glXBindTexImageEXT(x_display_, glx_pixmap_, GLX_FRONT_LEFT_EXT, NULL); | |
270 | |
271 available_ = true; | |
272 } | |
273 | |
274 VaapiH264Decoder::DecodeSurface::~DecodeSurface() { | |
275 // Unbind surface from texture and deallocate resources. | |
276 glXReleaseTexImageEXT(x_display_, glx_pixmap_, GLX_FRONT_LEFT_EXT); | |
277 glXDestroyGLXPixmap(x_display_, glx_pixmap_); | |
278 XFreePixmap(x_display_, x_pixmap_); | |
279 } | |
280 | |
281 void VaapiH264Decoder::DecodeSurface::Get(int32 input_id, int poc) { | |
282 DCHECK_EQ(available_, true); | |
283 available_ = false; | |
284 input_id_ = input_id; | |
285 poc_ = poc; | |
286 } | |
287 | |
288 void VaapiH264Decoder::DecodeSurface::Put() { | |
289 available_ = true; | |
290 } | |
291 | |
292 bool VaapiH264Decoder::DecodeSurface::Sync() { | |
293 // Put the decoded data into XPixmap bound to the texture. | |
294 VAStatus va_res = VAAPI_PutSurface(va_display_, | |
295 va_surface_id_, x_pixmap_, | |
296 0, 0, width_, height_, | |
297 0, 0, width_, height_, | |
298 NULL, 0, 0); | |
299 VA_SUCCESS_OR_RETURN(va_res, "Failed putting decoded picture to texture", | |
300 false); | |
301 | |
302 // Wait for the data to be put into the buffer so it'd ready for output. | |
303 va_res = VAAPI_SyncSurface(va_display_, va_surface_id_); | |
304 VA_SUCCESS_OR_RETURN(va_res, "Failed syncing decoded picture", false); | |
305 | |
306 return true; | |
307 } | |
308 | |
309 VaapiH264Decoder::VaapiH264Decoder() { | |
310 Reset(); | |
311 curr_sps_id_ = -1; | |
312 curr_pps_id_ = -1; | |
313 pic_width_ = -1; | |
314 pic_height_ = -1; | |
315 max_frame_num_ = 0; | |
316 max_pic_num_ = 0; | |
317 max_long_term_frame_idx_ = 0; | |
318 max_pic_order_cnt_lsb_ = 0; | |
319 state_ = kUninitialized; | |
320 } | |
321 | |
322 VaapiH264Decoder::~VaapiH264Decoder() { | |
323 Destroy(); | |
324 } | |
325 | |
326 // This puts the decoder in state where it keeps stream data and is ready | |
327 // to resume playback from a random location in the stream, but drops all | |
328 // inputs and outputs and makes all surfaces available for use. | |
329 void VaapiH264Decoder::Reset() { | |
330 frame_ready_at_hw_ = false; | |
331 | |
332 curr_pic_.reset(); | |
333 | |
334 frame_num_ = 0; | |
335 prev_frame_num_ = -1; | |
336 prev_frame_num_offset_ = -1; | |
337 | |
338 prev_ref_has_memmgmnt5_ = false; | |
339 prev_ref_top_field_order_cnt_ = -1; | |
340 prev_ref_pic_order_cnt_msb_ = -1; | |
341 prev_ref_pic_order_cnt_lsb_ = -1; | |
342 prev_ref_field_ = H264Picture::FIELD_NONE; | |
343 | |
344 pending_slice_bufs_ = std::queue<VABufferID>(); | |
345 pending_va_bufs_ = std::queue<VABufferID>(); | |
346 | |
347 ref_pic_list0_.clear(); | |
348 ref_pic_list1_.clear(); | |
349 | |
350 poc_to_decode_surfaces_.clear(); | |
351 | |
352 for (DecodeSurfaces::iterator iter = decode_surfaces_.begin(); | |
353 iter != decode_surfaces_.end(); ++iter) | |
354 iter->second->Put(); | |
355 num_available_decode_surfaces_ = decode_surfaces_.size(); | |
356 | |
357 dpb_.Clear(); | |
358 parser_.Reset(); | |
359 | |
360 // Still initialized and ready to decode, unless called from constructor, | |
361 // which will change it back. | |
362 state_ = kAfterReset; | |
363 } | |
364 | |
365 void VaapiH264Decoder::Destroy() { | |
366 VAStatus va_res; | |
367 | |
368 if (state_ == kUninitialized) | |
369 return; | |
370 | |
371 switch (state_) { | |
372 case kDecoding: | |
373 case kAfterReset: | |
374 case kError: | |
375 DestroyVASurfaces(); | |
376 // fallthrough | |
377 case kInitialized: | |
378 va_res = VAAPI_DestroyConfig(va_display_, va_config_id_); | |
379 VA_LOG_ON_ERROR(va_res, "vaDestroyConfig failed"); | |
380 va_res = VAAPI_Terminate(va_display_); | |
381 VA_LOG_ON_ERROR(va_res, "vaTerminate failed"); | |
382 // fallthrough | |
383 case kUninitialized: | |
384 break; | |
385 } | |
386 | |
387 state_ = kUninitialized; | |
388 } | |
389 | |
390 // Maps Profile enum values to VaProfile values. | |
391 bool VaapiH264Decoder::SetProfile(media::VideoCodecProfile profile) { | |
392 switch (profile) { | |
393 case media::H264PROFILE_BASELINE: | |
394 profile_ = VAProfileH264Baseline; | |
395 break; | |
396 case media::H264PROFILE_MAIN: | |
397 profile_ = VAProfileH264Main; | |
398 break; | |
399 case media::H264PROFILE_HIGH: | |
400 profile_ = VAProfileH264High; | |
401 break; | |
402 default: | |
403 return false; | |
404 } | |
405 return true; | |
406 } | |
407 | |
408 class ScopedPtrXFree { | |
409 public: | |
410 void operator()(void* x) const { | |
411 ::XFree(x); | |
412 } | |
413 }; | |
414 | |
415 bool VaapiH264Decoder::InitializeFBConfig() { | |
416 const int fbconfig_attr[] = { | |
417 GLX_DRAWABLE_TYPE, GLX_PIXMAP_BIT, | |
418 GLX_BIND_TO_TEXTURE_TARGETS_EXT, GLX_TEXTURE_2D_BIT_EXT, | |
419 GLX_BIND_TO_TEXTURE_RGB_EXT, GL_TRUE, | |
420 GLX_Y_INVERTED_EXT, GL_TRUE, | |
421 GL_NONE, | |
422 }; | |
423 | |
424 int num_fbconfigs; | |
425 scoped_ptr_malloc<GLXFBConfig, ScopedPtrXFree> glx_fb_configs( | |
426 glXChooseFBConfig(x_display_, DefaultScreen(x_display_), fbconfig_attr, | |
427 &num_fbconfigs)); | |
428 if (!glx_fb_configs.get()) | |
429 return false; | |
430 if (!num_fbconfigs) | |
431 return false; | |
432 | |
433 fb_config_ = glx_fb_configs.get()[0]; | |
434 return true; | |
435 } | |
436 | |
437 bool VaapiH264Decoder::Initialize(media::VideoCodecProfile profile, | |
438 Display* x_display, | |
439 GLXContext glx_context, | |
440 const OutputPicCallback& | |
441 output_pic_callback) { | |
442 DCHECK_EQ(state_, kUninitialized); | |
443 | |
444 output_pic_callback_ = output_pic_callback; | |
445 | |
446 x_display_ = x_display; | |
447 parent_glx_context_ = glx_context; | |
448 | |
449 if (!SetProfile(profile)) { | |
450 DVLOG(1) << "Unsupported profile"; | |
451 return false; | |
452 } | |
453 | |
454 if (!AreVaapiFunctionPointersInitialized()) { | |
455 DVLOG(1) << "Could not load libva"; | |
456 return false; | |
457 } | |
458 | |
459 if (!InitializeFBConfig()) { | |
460 DVLOG(1) << "Could not get a usable FBConfig"; | |
461 return false; | |
462 } | |
463 | |
464 va_display_ = VAAPI_GetDisplayGLX(x_display_); | |
465 if (!VAAPI_DisplayIsValid(va_display_)) { | |
466 DVLOG(1) << "Could not get a valid VA display"; | |
467 return false; | |
468 } | |
469 | |
470 int major_version, minor_version; | |
471 VAStatus va_res; | |
472 va_res = VAAPI_Initialize(va_display_, &major_version, &minor_version); | |
473 VA_SUCCESS_OR_RETURN(va_res, "vaInitialize failed", false); | |
474 DVLOG(1) << "VAAPI version: " << major_version << "." << minor_version; | |
475 | |
476 VAConfigAttrib attrib; | |
477 attrib.type = VAConfigAttribRTFormat; | |
478 | |
479 VAEntrypoint entrypoint = VAEntrypointVLD; | |
480 va_res = VAAPI_GetConfigAttributes(va_display_, profile_, entrypoint, | |
481 &attrib, 1); | |
482 VA_SUCCESS_OR_RETURN(va_res, "vaGetConfigAttributes failed", false); | |
483 | |
484 if (!(attrib.value & VA_RT_FORMAT_YUV420)) { | |
485 DVLOG(1) << "YUV420 not supported"; | |
486 return false; | |
487 } | |
488 | |
489 va_res = VAAPI_CreateConfig(va_display_, profile_, entrypoint, | |
490 &attrib, 1, &va_config_id_); | |
491 VA_SUCCESS_OR_RETURN(va_res, "vaCreateConfig failed", false); | |
492 | |
493 state_ = kInitialized; | |
494 return true; | |
495 } | |
496 | |
497 void VaapiH264Decoder::ReusePictureBuffer(int32 picture_buffer_id) { | |
498 DecodeSurfaces::iterator it = decode_surfaces_.find(picture_buffer_id); | |
499 if (it == decode_surfaces_.end() || it->second->available()) { | |
500 DVLOG(1) << "Asked to reuse an invalid/already available surface"; | |
501 return; | |
502 } | |
503 it->second->Put(); | |
504 ++num_available_decode_surfaces_; | |
505 } | |
506 | |
507 bool VaapiH264Decoder::AssignPictureBuffer(int32 picture_buffer_id, | |
Ami GONE FROM CHROMIUM
2012/04/09 21:35:53
Another example of a method returning bool to sign
Pawel Osciak
2012/05/03 16:22:07
And again because the VDA API doesn't allow return
Ami GONE FROM CHROMIUM
2012/05/03 23:22:53
Sure it does - NotifyError().
Pawel Osciak
2012/05/06 17:49:19
Ok, will NotifyError() instead.
| |
508 uint32 texture_id) { | |
509 DCHECK_EQ(state_, kDecoding); | |
510 | |
511 if (decode_surfaces_.size() >= GetRequiredNumOfPictures()) { | |
512 DVLOG(1) << "Got more surfaces than required"; | |
513 return false; | |
514 } | |
515 | |
516 // This will not work if we start using VDA.DismissPicture() | |
517 linked_ptr<DecodeSurface> dec_surface(new DecodeSurface(fb_config_, | |
Ami GONE FROM CHROMIUM
2012/04/09 21:35:53
move fb_config_ to the next line
Pawel Osciak
2012/05/03 16:22:07
Done, not sure why you prefer it that way though.
Ami GONE FROM CHROMIUM
2012/05/03 23:22:53
Resolved over IM (not my preference, this is style
| |
518 x_display_, va_display_, va_surface_ids_[decode_surfaces_.size()], | |
519 picture_buffer_id, texture_id, pic_width_, pic_height_)); | |
520 if (!dec_surface->available()) { | |
521 DVLOG(1) << "Error creating a decoding surface (binding to texture?)"; | |
522 return false; | |
523 } | |
524 | |
525 DVLOG(2) << "New picture assigned, texture id: " << dec_surface->texture_id() | |
526 << " pic buf id: " << dec_surface->picture_buffer_id() | |
527 << " will use va surface " << dec_surface->va_surface_id(); | |
528 | |
529 bool inserted = decode_surfaces_.insert(std::make_pair(picture_buffer_id, | |
530 dec_surface)).second; | |
531 DCHECK(inserted); | |
532 ++num_available_decode_surfaces_; | |
533 | |
534 return true; | |
535 } | |
536 | |
537 bool VaapiH264Decoder::CreateVASurfaces() { | |
538 DCHECK_NE(pic_width_, -1); | |
539 DCHECK_NE(pic_height_, -1); | |
540 DCHECK_EQ(state_, kInitialized); | |
541 | |
542 // Allocate VASurfaces in driver. | |
543 VAStatus va_res = VAAPI_CreateSurfaces(va_display_, pic_width_, | |
544 pic_height_, VA_RT_FORMAT_YUV420, | |
545 GetRequiredNumOfPictures(), | |
546 va_surface_ids_); | |
547 VA_SUCCESS_OR_RETURN(va_res, "vaCreateSurfaces failed", false); | |
548 | |
549 DCHECK(decode_surfaces_.empty()); | |
550 | |
551 // And create a context associated with them. | |
552 va_res = VAAPI_CreateContext(va_display_, va_config_id_, | |
553 pic_width_, pic_height_, VA_PROGRESSIVE, | |
554 va_surface_ids_, GetRequiredNumOfPictures(), | |
555 &va_context_id_); | |
556 VA_SUCCESS_OR_RETURN(va_res, "vaCreateContext failed", false); | |
557 | |
558 return true; | |
559 } | |
560 | |
561 void VaapiH264Decoder::DestroyVASurfaces() { | |
562 DCHECK(state_ == kDecoding || state_ == kError || state_ == kAfterReset); | |
563 | |
564 decode_surfaces_.clear(); | |
565 | |
566 VAStatus va_res = VAAPI_DestroyContext(va_display_, va_context_id_); | |
567 VA_LOG_ON_ERROR(va_res, "vaDestroyContext failed"); | |
568 | |
569 va_res = VAAPI_DestroySurfaces(va_display_, va_surface_ids_, | |
570 GetRequiredNumOfPictures()); | |
571 VA_LOG_ON_ERROR(va_res, "vaDestroyContext failed"); | |
572 } | |
573 | |
574 // Fill |va_pic| with default/neutral values. | |
575 static void InitVAPicture(VAPictureH264* va_pic) { | |
576 memset(va_pic, 0, sizeof(*va_pic)); | |
577 va_pic->picture_id = VA_INVALID_ID; | |
578 va_pic->flags = VA_PICTURE_H264_INVALID; | |
579 } | |
580 | |
581 void VaapiH264Decoder::FillVAPicture(VAPictureH264 *va_pic, H264Picture* pic) { | |
582 POCToDecodeSurfaces::iterator iter = poc_to_decode_surfaces_.find( | |
583 pic->pic_order_cnt); | |
584 if (iter == poc_to_decode_surfaces_.end()) { | |
585 DVLOG(1) << "Could not find surface with POC: " << pic->pic_order_cnt; | |
586 // Cannot provide a ref picture, will corrupt output, but may be able | |
Ami GONE FROM CHROMIUM
2012/04/09 21:35:53
Does this indicate an invalid stream?
(ffmpeg has
Pawel Osciak
2012/05/03 16:22:07
This will just mess up the decoded pictures for a
| |
587 // to recover. | |
588 InitVAPicture(va_pic); | |
589 return; | |
590 } | |
591 | |
592 va_pic->picture_id = iter->second->va_surface_id(); | |
593 va_pic->frame_idx = pic->frame_num; | |
594 va_pic->flags = 0; | |
595 | |
596 switch (pic->field) { | |
597 case H264Picture::FIELD_NONE: | |
598 break; | |
599 case H264Picture::FIELD_TOP: | |
600 va_pic->flags |= VA_PICTURE_H264_TOP_FIELD; | |
601 break; | |
602 case H264Picture::FIELD_BOTTOM: | |
603 va_pic->flags |= VA_PICTURE_H264_BOTTOM_FIELD; | |
604 break; | |
605 } | |
606 | |
607 if (pic->ref) { | |
608 va_pic->flags |= pic->long_term ? VA_PICTURE_H264_LONG_TERM_REFERENCE | |
609 : VA_PICTURE_H264_SHORT_TERM_REFERENCE; | |
610 } | |
611 | |
612 va_pic->TopFieldOrderCnt = pic->top_field_order_cnt; | |
613 va_pic->BottomFieldOrderCnt = pic->bottom_field_order_cnt; | |
614 } | |
615 | |
616 int VaapiH264Decoder::FillVARefFramesFromDPB(VAPictureH264 *va_pics, | |
617 int num_pics) { | |
618 H264Picture::PtrVector::reverse_iterator rit; | |
619 int i; | |
620 | |
621 // Return reference frames in reverse order of insertion. | |
622 // Libva does not document this, but other implementations (e.g. mplayer) | |
623 // do it this way as well. | |
624 for (rit = dpb_.rbegin(), i = 0; rit != dpb_.rend() && i < num_pics; ++rit) { | |
625 if ((*rit)->ref) | |
626 FillVAPicture(&va_pics[i++], *rit); | |
627 } | |
628 | |
629 return i; | |
630 } | |
631 | |
632 // Can only be called when all surfaces are already bound | |
633 // to textures (cannot be run at the same time as AssignPictureBuffer). | |
634 bool VaapiH264Decoder::AssignSurfaceToPoC(int poc) { | |
635 // Find a surface not currently holding data used for reference and/or | |
636 // to be displayed and mark it as used. | |
637 DecodeSurfaces::iterator iter = decode_surfaces_.begin(); | |
638 for (; iter != decode_surfaces_.end(); ++iter) { | |
639 if (iter->second->available()) { | |
640 --num_available_decode_surfaces_; | |
641 DCHECK_GE(num_available_decode_surfaces_, 0); | |
642 | |
643 // Associate with input id and poc and mark as unavailable. | |
644 iter->second->Get(curr_input_id_, poc); | |
645 DVLOG(4) << "Will use surface " << iter->second->va_surface_id() | |
646 << " for POC " << iter->second->poc() | |
647 << " input ID: " << iter->second->input_id(); | |
648 bool inserted = poc_to_decode_surfaces_.insert(std::make_pair(poc, | |
649 iter->second.get())).second; | |
650 DCHECK(inserted); | |
651 return true; | |
652 } | |
653 } | |
654 | |
655 // Could not find an available surface. | |
656 return false; | |
657 } | |
658 | |
659 // Can only be called when all surfaces are already bound | |
660 // to textures (cannot be run at the same time as AssignPictureBuffer). | |
661 VaapiH264Decoder::DecodeSurface* VaapiH264Decoder::UnassignSurfaceFromPoC( | |
662 int poc) { | |
663 DecodeSurface* dec_surface; | |
664 POCToDecodeSurfaces::iterator it = poc_to_decode_surfaces_.find(poc); | |
665 if (it == poc_to_decode_surfaces_.end()) { | |
666 DVLOG(1) << "Asked to unassign an unassigned POC"; | |
667 return NULL; | |
668 } | |
669 dec_surface = it->second; | |
670 DVLOG(4) << "POC " << poc << " no longer using surface " | |
671 << dec_surface->va_surface_id(); | |
672 poc_to_decode_surfaces_.erase(it); | |
673 return dec_surface; | |
674 } | |
675 | |
676 // Fill a VAPictureParameterBufferH264 to be later sent to the HW decoder. | |
677 bool VaapiH264Decoder::SendPPS() { | |
678 const H264PPS* pps = parser_.GetPPS(curr_pps_id_); | |
679 DCHECK(pps); | |
Ami GONE FROM CHROMIUM
2012/04/09 21:35:53
This means "it is a programming error if pps is NU
Pawel Osciak
2012/05/03 16:22:07
Correct, but only because of how the decoder uses
| |
680 | |
681 const H264SPS* sps = parser_.GetSPS(pps->seq_parameter_set_id); | |
682 DCHECK(sps); | |
683 | |
684 DCHECK(curr_pic_.get()); | |
685 | |
686 VAPictureParameterBufferH264 pic_param; | |
687 memset(&pic_param, 0, sizeof(VAPictureParameterBufferH264)); | |
688 | |
689 #define FromSPSToPP(a) pic_param.a = sps->a; | |
690 #define FromSPSToPP2(a, b) pic_param.b = sps->a; | |
Ami GONE FROM CHROMIUM
2012/04/09 21:35:53
Needs to be named like FROM_SPS_TO_PP2()
but maybe
Pawel Osciak
2012/05/03 16:22:07
Done.
| |
691 FromSPSToPP2(pic_width_in_mbs_minus1, picture_width_in_mbs_minus1); | |
692 // This assumes non-interlaced video | |
693 FromSPSToPP2(pic_height_in_map_units_minus1, picture_height_in_mbs_minus1); | |
694 FromSPSToPP(bit_depth_luma_minus8); | |
695 FromSPSToPP(bit_depth_chroma_minus8); | |
Ami GONE FROM CHROMIUM
2012/04/09 21:35:53
Please #undef'ine macros when you're done w/ them,
Pawel Osciak
2012/05/03 16:22:07
Wow, now you are really nitpicking... :) It just l
Ami GONE FROM CHROMIUM
2012/05/03 23:22:53
No, it increases it because your readers don't hav
| |
696 | |
697 #define FromSPSToPPSF(a) pic_param.seq_fields.bits.a = sps->a; | |
698 #define FromSPSToPPSF2(a, b) pic_param.seq_fields.bits.b = sps->a; | |
699 FromSPSToPPSF(chroma_format_idc); | |
700 FromSPSToPPSF2(separate_colour_plane_flag, residual_colour_transform_flag); | |
701 FromSPSToPPSF(gaps_in_frame_num_value_allowed_flag); | |
702 FromSPSToPPSF(frame_mbs_only_flag); | |
703 FromSPSToPPSF(mb_adaptive_frame_field_flag); | |
704 FromSPSToPPSF(direct_8x8_inference_flag); | |
705 pic_param.seq_fields.bits.MinLumaBiPredSize8x8 = (sps->level_idc >= 31); | |
Ami GONE FROM CHROMIUM
2012/04/09 21:35:53
gut-check: should that really be >=31 or >31 (or >
Pawel Osciak
2012/05/03 16:22:07
>= 31. Per spec.
| |
706 FromSPSToPPSF(log2_max_frame_num_minus4); | |
707 FromSPSToPPSF(pic_order_cnt_type); | |
708 FromSPSToPPSF(log2_max_pic_order_cnt_lsb_minus4); | |
709 FromSPSToPPSF(delta_pic_order_always_zero_flag); | |
710 | |
711 #define FromPPSToPP(a) pic_param.a = pps->a; | |
712 FromPPSToPP(num_slice_groups_minus1); | |
713 pic_param.slice_group_map_type = 0; | |
714 pic_param.slice_group_change_rate_minus1 = 0; | |
715 FromPPSToPP(pic_init_qp_minus26); | |
716 FromPPSToPP(pic_init_qs_minus26); | |
717 FromPPSToPP(chroma_qp_index_offset); | |
718 FromPPSToPP(second_chroma_qp_index_offset); | |
719 | |
720 #define FromPPSToPPPF(a) pic_param.pic_fields.bits.a = pps->a; | |
721 #define FromPPSToPPPF2(a, b) pic_param.pic_fields.bits.b = pps->a; | |
722 FromPPSToPPPF(entropy_coding_mode_flag); | |
723 FromPPSToPPPF(weighted_pred_flag); | |
724 FromPPSToPPPF(weighted_bipred_idc); | |
725 FromPPSToPPPF(transform_8x8_mode_flag); | |
726 | |
727 pic_param.pic_fields.bits.field_pic_flag = 0; | |
728 FromPPSToPPPF(constrained_intra_pred_flag); | |
729 FromPPSToPPPF2(bottom_field_pic_order_in_frame_present_flag, | |
730 pic_order_present_flag); | |
731 FromPPSToPPPF(deblocking_filter_control_present_flag); | |
732 FromPPSToPPPF(redundant_pic_cnt_present_flag); | |
733 pic_param.pic_fields.bits.reference_pic_flag = curr_pic_->ref; | |
734 | |
735 pic_param.frame_num = curr_pic_->frame_num; | |
736 | |
737 InitVAPicture(&pic_param.CurrPic); | |
738 FillVAPicture(&pic_param.CurrPic, curr_pic_.get()); | |
739 | |
740 // Init reference pictures' array. | |
741 for (int i = 0; i < 16; ++i) | |
742 InitVAPicture(&pic_param.ReferenceFrames[i]); | |
743 | |
744 // And fill it with picture info from DPB. | |
745 FillVARefFramesFromDPB(pic_param.ReferenceFrames, | |
746 arraysize(pic_param.ReferenceFrames)); | |
747 | |
748 pic_param.num_ref_frames = sps->max_num_ref_frames; | |
749 | |
750 // Allocate a buffer in driver for this parameter buffer and upload data. | |
751 VABufferID pic_param_buf_id; | |
752 VAStatus va_res = VAAPI_CreateBuffer(va_display_, va_context_id_, | |
753 VAPictureParameterBufferType, | |
754 sizeof(VAPictureParameterBufferH264), | |
755 1, &pic_param, &pic_param_buf_id); | |
756 VA_SUCCESS_OR_RETURN(va_res, "Failed to create a buffer for PPS", false); | |
757 | |
758 // Queue its VA buffer ID to be committed on HW decode run. | |
759 pending_va_bufs_.push(pic_param_buf_id); | |
760 | |
761 return true; | |
762 } | |
763 | |
764 // Fill a VAIQMatrixBufferH264 to be later sent to the HW decoder. | |
765 bool VaapiH264Decoder::SendIQMatrix() { | |
766 const H264PPS* pps = parser_.GetPPS(curr_pps_id_); | |
767 DCHECK(pps); | |
768 | |
769 VAIQMatrixBufferH264 iq_matrix_buf; | |
770 memset(&iq_matrix_buf, 0, sizeof(VAIQMatrixBufferH264)); | |
771 | |
772 if (pps->pic_scaling_matrix_present_flag) { | |
773 for (int i = 0; i < 6; ++i) { | |
774 for (int j = 0; j < 16; ++j) | |
775 iq_matrix_buf.ScalingList4x4[i][j] = pps->scaling_list4x4[i][j]; | |
776 } | |
777 | |
778 for (int i = 0; i < 2; ++i) { | |
779 for (int j = 0; j < 64; ++j) | |
780 iq_matrix_buf.ScalingList8x8[i][j] = pps->scaling_list8x8[i][j]; | |
781 } | |
782 } else { | |
783 const H264SPS* sps = parser_.GetSPS(pps->seq_parameter_set_id); | |
784 DCHECK(sps); | |
785 for (int i = 0; i < 6; ++i) { | |
786 for (int j = 0; j < 16; ++j) | |
787 iq_matrix_buf.ScalingList4x4[i][j] = sps->scaling_list4x4[i][j]; | |
788 } | |
789 | |
790 for (int i = 0; i < 2; ++i) { | |
791 for (int j = 0; j < 64; ++j) | |
792 iq_matrix_buf.ScalingList8x8[i][j] = sps->scaling_list8x8[i][j]; | |
793 } | |
794 } | |
795 | |
796 // Allocate a buffer in driver for this parameter buffer and upload data. | |
797 VABufferID iq_matrix_buf_id; | |
798 VAStatus va_res = VAAPI_CreateBuffer(va_display_, va_context_id_, | |
799 VAIQMatrixBufferType, | |
800 sizeof(VAIQMatrixBufferH264), 1, | |
801 &iq_matrix_buf, &iq_matrix_buf_id); | |
802 VA_SUCCESS_OR_RETURN(va_res, "Failed to create a buffer for IQMatrix", | |
803 false); | |
804 | |
805 // Queue its VA buffer ID to be committed on HW decode run. | |
806 pending_va_bufs_.push(iq_matrix_buf_id); | |
807 | |
808 return true; | |
809 } | |
810 | |
811 bool VaapiH264Decoder::SendVASliceParam(H264SliceHeader* slice_hdr) { | |
812 const H264PPS* pps = parser_.GetPPS(slice_hdr->pic_parameter_set_id); | |
813 DCHECK(pps); | |
814 | |
815 const H264SPS* sps = parser_.GetSPS(pps->seq_parameter_set_id); | |
816 DCHECK(sps); | |
817 | |
818 VASliceParameterBufferH264 slice_param; | |
819 memset(&slice_param, 0, sizeof(VASliceParameterBufferH264)); | |
820 | |
821 slice_param.slice_data_size = slice_hdr->nalu_size; | |
822 slice_param.slice_data_offset = 0; | |
823 slice_param.slice_data_flag = VA_SLICE_DATA_FLAG_ALL; | |
824 slice_param.slice_data_bit_offset = slice_hdr->header_bit_size; | |
825 | |
826 #define SHDRToSP(a) slice_param.a = slice_hdr->a; | |
827 SHDRToSP(first_mb_in_slice); | |
828 slice_param.slice_type = slice_hdr->slice_type % 5; | |
829 SHDRToSP(direct_spatial_mv_pred_flag); | |
830 | |
831 // TODO posciak: make sure parser sets those even when override flags | |
832 // in slice header is off. | |
833 SHDRToSP(num_ref_idx_l0_active_minus1); | |
834 SHDRToSP(num_ref_idx_l1_active_minus1); | |
835 SHDRToSP(cabac_init_idc); | |
836 SHDRToSP(slice_qp_delta); | |
837 SHDRToSP(disable_deblocking_filter_idc); | |
838 SHDRToSP(slice_alpha_c0_offset_div2); | |
839 SHDRToSP(slice_beta_offset_div2); | |
840 | |
841 if (((IsH264PSlice(slice_hdr) || IsH264SPSlice(slice_hdr)) | |
842 && pps->weighted_pred_flag) | |
843 || (IsH264BSlice(slice_hdr) && pps->weighted_bipred_idc == 1)) { | |
844 SHDRToSP(luma_log2_weight_denom); | |
845 SHDRToSP(chroma_log2_weight_denom); | |
846 | |
847 SHDRToSP(luma_weight_l0_flag); | |
848 SHDRToSP(luma_weight_l1_flag); | |
849 | |
850 SHDRToSP(chroma_weight_l0_flag); | |
851 SHDRToSP(chroma_weight_l1_flag); | |
852 | |
853 for (int i = 0; i <= slice_param.num_ref_idx_l0_active_minus1; ++i) { | |
854 slice_param.luma_weight_l0[i] = | |
855 slice_hdr->pred_weight_table_l0.luma_weight[i]; | |
856 slice_param.luma_offset_l0[i] = | |
857 slice_hdr->pred_weight_table_l0.luma_offset[i]; | |
858 | |
859 for (int j = 0; j < 2; ++j) { | |
860 slice_param.chroma_weight_l0[i][j] = | |
861 slice_hdr->pred_weight_table_l0.chroma_weight[i][j]; | |
862 slice_param.chroma_offset_l0[i][j] = | |
863 slice_hdr->pred_weight_table_l0.chroma_offset[i][j]; | |
864 } | |
865 } | |
866 | |
867 if (IsH264BSlice(slice_hdr)) { | |
868 for (int i = 0; i <= slice_param.num_ref_idx_l1_active_minus1; ++i) { | |
869 slice_param.luma_weight_l1[i] = | |
870 slice_hdr->pred_weight_table_l1.luma_weight[i]; | |
871 slice_param.luma_offset_l1[i] = | |
872 slice_hdr->pred_weight_table_l1.luma_offset[i]; | |
873 | |
874 for (int j = 0; j < 2; ++j) { | |
875 slice_param.chroma_weight_l1[i][j] = | |
876 slice_hdr->pred_weight_table_l1.chroma_weight[i][j]; | |
877 slice_param.chroma_offset_l1[i][j] = | |
878 slice_hdr->pred_weight_table_l1.chroma_offset[i][j]; | |
879 } | |
880 } | |
881 } | |
882 } | |
883 | |
884 for (int i = 0; i < 32; ++i) { | |
885 InitVAPicture(&slice_param.RefPicList0[i]); | |
886 InitVAPicture(&slice_param.RefPicList1[i]); | |
887 } | |
888 | |
889 int i; | |
890 H264Picture::PtrVector::iterator it; | |
891 for (it = ref_pic_list0_.begin(), i = 0; it != ref_pic_list0_.end(); | |
892 ++it, ++i) | |
893 FillVAPicture(&slice_param.RefPicList0[i], *it); | |
894 for (it = ref_pic_list1_.begin(), i = 0; it != ref_pic_list1_.end(); | |
895 ++it, ++i) | |
896 FillVAPicture(&slice_param.RefPicList1[i], *it); | |
897 | |
898 // Allocate a buffer in driver for this parameter buffer and upload data. | |
899 VABufferID slice_param_buf_id; | |
900 VAStatus va_res = VAAPI_CreateBuffer(va_display_, va_context_id_, | |
901 VASliceParameterBufferType, | |
902 sizeof(VASliceParameterBufferH264), | |
903 1, &slice_param, &slice_param_buf_id); | |
904 VA_SUCCESS_OR_RETURN(va_res, "Failed creating a buffer for slice param", | |
905 false); | |
906 | |
907 // Queue its VA buffer ID to be committed on HW decode run. | |
908 pending_slice_bufs_.push(slice_param_buf_id); | |
909 | |
910 return true; | |
911 } | |
912 | |
913 bool VaapiH264Decoder::SendSliceData(const uint8* ptr, size_t size) | |
914 { | |
915 // Can't help it, blame libva... | |
916 void* non_const_ptr = const_cast<uint8*>(ptr); | |
917 | |
918 VABufferID slice_data_buf_id; | |
919 VAStatus va_res = VAAPI_CreateBuffer(va_display_, va_context_id_, | |
920 VASliceDataBufferType, size, 1, | |
921 non_const_ptr, &slice_data_buf_id); | |
922 VA_SUCCESS_OR_RETURN(va_res, "Failed creating a buffer for slice data", | |
923 false); | |
924 | |
925 pending_slice_bufs_.push(slice_data_buf_id); | |
926 return true; | |
927 } | |
928 | |
929 bool VaapiH264Decoder::QueueSlice(H264SliceHeader* slice_hdr) { | |
930 DCHECK(curr_pic_.get()); | |
931 | |
932 if (!SendVASliceParam(slice_hdr)) | |
933 return false; | |
934 | |
935 if (!SendSliceData(slice_hdr->nalu_data, slice_hdr->nalu_size)) | |
936 return false; | |
937 | |
938 return true; | |
939 } | |
940 | |
941 // TODO posciak: start using vaMapBuffer instead of vaCreateBuffer wherever | |
Ami GONE FROM CHROMIUM
2012/04/09 21:35:53
TODO(posciak)
Pawel Osciak
2012/05/03 16:22:07
Done.
| |
942 // possible. | |
943 | |
944 bool VaapiH264Decoder::DecodePicture() { | |
945 DCHECK(!frame_ready_at_hw_); | |
946 DCHECK(curr_pic_.get()); | |
947 | |
948 static const size_t kMaxVABuffers = 32; | |
949 DCHECK_LE(pending_va_bufs_.size(), kMaxVABuffers); | |
950 DCHECK_LE(pending_slice_bufs_.size(), kMaxVABuffers); | |
951 | |
952 DVLOG(4) << "Pending VA bufs to commit: " << pending_va_bufs_.size(); | |
953 DVLOG(4) << "Pending slice bufs to commit: " << pending_slice_bufs_.size(); | |
954 | |
955 // Find the surface associated with the picture to be decoded. | |
956 DCHECK(pending_slice_bufs_.size()); | |
957 DecodeSurface* dec_surface = | |
958 poc_to_decode_surfaces_[curr_pic_->pic_order_cnt]; | |
959 DVLOG(4) << "Decoding POC " << curr_pic_->pic_order_cnt | |
960 << " into surface " << dec_surface->va_surface_id(); | |
961 | |
962 // Get ready to decode into surface. | |
963 VAStatus va_res = VAAPI_BeginPicture(va_display_, va_context_id_, | |
964 dec_surface->va_surface_id()); | |
965 VA_SUCCESS_OR_RETURN(va_res, "vaBeginPicture failed", false); | |
966 | |
967 // Put buffer IDs for pending parameter buffers into buffers[]. | |
968 VABufferID buffers[kMaxVABuffers]; | |
969 size_t num_buffers = pending_va_bufs_.size(); | |
970 for (size_t i = 0; i < num_buffers && i < kMaxVABuffers; ++i) { | |
971 buffers[i] = pending_va_bufs_.front(); | |
972 pending_va_bufs_.pop(); | |
973 } | |
974 | |
975 // And send them to the HW decoder. | |
976 va_res = VAAPI_RenderPicture(va_display_, va_context_id_, buffers, | |
977 num_buffers); | |
978 VA_SUCCESS_OR_RETURN(va_res, "vaRenderPicture for va_bufs failed", false); | |
979 | |
980 DVLOG(4) << "Committed " << num_buffers << "VA buffers"; | |
981 | |
982 // Put buffer IDs for pending slice data buffers into buffers[]. | |
983 num_buffers = pending_slice_bufs_.size(); | |
984 for (size_t i = 0; i < num_buffers && i < kMaxVABuffers; ++i) { | |
985 buffers[i] = pending_slice_bufs_.front(); | |
986 pending_slice_bufs_.pop(); | |
987 } | |
988 | |
989 // And send them to the Hw decoder. | |
990 va_res = VAAPI_RenderPicture(va_display_, va_context_id_, buffers, | |
991 num_buffers); | |
992 VA_SUCCESS_OR_RETURN(va_res, "vaRenderPicture fo slices failed", false); | |
993 | |
994 DVLOG(4) << "Committed " << num_buffers << "slice buffers"; | |
995 | |
996 // Instruct HW decoder to start processing committed buffers (decode this | |
997 // picture). This does not block until the end of decode. | |
998 va_res = VAAPI_EndPicture(va_display_, va_context_id_); | |
999 VA_SUCCESS_OR_RETURN(va_res, "vaEndPicture failed", false); | |
1000 | |
1001 // Used to notify clients that we had sufficient data to start decoding | |
1002 // a new frame. | |
1003 frame_ready_at_hw_ = true; | |
1004 return true; | |
1005 } | |
1006 | |
1007 | |
1008 bool VaapiH264Decoder::InitCurrPicture(H264SliceHeader* slice_hdr) { | |
1009 DCHECK(curr_pic_.get()); | |
1010 | |
1011 memset(curr_pic_.get(), 0, sizeof(H264Picture)); | |
1012 | |
1013 curr_pic_->idr = slice_hdr->idr_pic_flag; | |
1014 | |
1015 if (slice_hdr->field_pic_flag) { | |
1016 curr_pic_->field = slice_hdr->bottom_field_flag ? H264Picture::FIELD_BOTTOM | |
1017 : H264Picture::FIELD_TOP; | |
1018 } else { | |
1019 curr_pic_->field = H264Picture::FIELD_NONE; | |
1020 } | |
1021 | |
1022 curr_pic_->ref = slice_hdr->nal_ref_idc != 0; | |
1023 // This assumes non-interlaced stream. | |
1024 curr_pic_->frame_num = curr_pic_->pic_num = slice_hdr->frame_num; | |
1025 | |
1026 if (!CalculatePicOrderCounts(slice_hdr)) | |
1027 return false; | |
1028 | |
1029 // Try to get an empty surface to decode this picture to. | |
1030 if (!AssignSurfaceToPoC(curr_pic_->pic_order_cnt)) { | |
1031 DVLOG(1) << "Failed getting a free surface for a picture"; | |
1032 return false; | |
1033 } | |
1034 | |
1035 curr_pic_->long_term_reference_flag = slice_hdr->long_term_reference_flag; | |
1036 curr_pic_->adaptive_ref_pic_marking_mode_flag = | |
1037 slice_hdr->adaptive_ref_pic_marking_mode_flag; | |
1038 | |
1039 // If the slice header indicates we will have to perform reference marking | |
1040 // process after this picture is decoded, store required data for that | |
1041 // purpose. | |
1042 if (slice_hdr->adaptive_ref_pic_marking_mode_flag) { | |
1043 COMPILE_ASSERT(sizeof(curr_pic_->ref_pic_marking) == | |
1044 sizeof(slice_hdr->ref_pic_marking), | |
1045 ref_pic_marking_array_sizes_do_not_match); | |
1046 memcpy(curr_pic_->ref_pic_marking, slice_hdr->ref_pic_marking, | |
1047 sizeof(curr_pic_->ref_pic_marking)); | |
1048 } | |
1049 | |
1050 return true; | |
1051 } | |
1052 | |
1053 bool VaapiH264Decoder::CalculatePicOrderCounts(H264SliceHeader* slice_hdr) { | |
1054 DCHECK_NE(curr_sps_id_, -1); | |
1055 | |
1056 int pic_order_cnt_lsb = slice_hdr->pic_order_cnt_lsb; | |
1057 curr_pic_->pic_order_cnt_lsb = pic_order_cnt_lsb; | |
1058 if (parser_.GetSPS(curr_sps_id_)->pic_order_cnt_type != 0) { | |
1059 DVLOG(1) << "Unsupported pic_order_cnt_type"; | |
1060 return false; | |
1061 } | |
1062 | |
1063 // See spec 8.2.1.1. | |
1064 int prev_pic_order_cnt_msb, prev_pic_order_cnt_lsb; | |
1065 if (slice_hdr->idr_pic_flag) { | |
1066 prev_pic_order_cnt_msb = prev_pic_order_cnt_lsb = 0; | |
1067 } else { | |
1068 if (prev_ref_has_memmgmnt5_) { | |
1069 if (prev_ref_field_ != H264Picture::FIELD_BOTTOM) { | |
1070 prev_pic_order_cnt_msb = 0; | |
1071 prev_pic_order_cnt_lsb = prev_ref_top_field_order_cnt_; | |
1072 } else { | |
1073 prev_pic_order_cnt_msb = 0; | |
1074 prev_pic_order_cnt_lsb = 0; | |
1075 } | |
1076 } else { | |
1077 prev_pic_order_cnt_msb = prev_ref_pic_order_cnt_msb_; | |
1078 prev_pic_order_cnt_lsb = prev_ref_pic_order_cnt_lsb_; | |
1079 } | |
1080 } | |
1081 | |
1082 DCHECK_NE(max_pic_order_cnt_lsb_, 0); | |
1083 if ((pic_order_cnt_lsb < prev_pic_order_cnt_lsb) && | |
1084 (prev_pic_order_cnt_lsb - pic_order_cnt_lsb >= | |
1085 max_pic_order_cnt_lsb_ / 2)) { | |
1086 curr_pic_->pic_order_cnt_msb = prev_pic_order_cnt_msb + | |
1087 max_pic_order_cnt_lsb_; | |
1088 } else if ((pic_order_cnt_lsb > prev_pic_order_cnt_lsb) && | |
1089 (pic_order_cnt_lsb - prev_pic_order_cnt_lsb > | |
1090 max_pic_order_cnt_lsb_ / 2)) { | |
1091 curr_pic_->pic_order_cnt_msb = prev_pic_order_cnt_msb - | |
1092 max_pic_order_cnt_lsb_; | |
1093 } else { | |
1094 curr_pic_->pic_order_cnt_msb = prev_pic_order_cnt_msb; | |
1095 } | |
1096 | |
1097 if (curr_pic_->field != H264Picture::FIELD_BOTTOM) { | |
1098 curr_pic_->top_field_order_cnt = curr_pic_->pic_order_cnt_msb + | |
1099 pic_order_cnt_lsb; | |
1100 } | |
1101 | |
1102 if (curr_pic_->field != H264Picture::FIELD_TOP) { | |
1103 // TODO posciak: perhaps replace with pic->field? | |
1104 if (!slice_hdr->field_pic_flag) { | |
1105 curr_pic_->bottom_field_order_cnt = curr_pic_->top_field_order_cnt + | |
1106 slice_hdr->delta_pic_order_cnt_bottom; | |
1107 } else { | |
1108 curr_pic_->bottom_field_order_cnt = curr_pic_->pic_order_cnt_msb + | |
1109 pic_order_cnt_lsb; | |
1110 } | |
1111 } | |
1112 | |
1113 switch (curr_pic_->field) { | |
1114 case H264Picture::FIELD_NONE: | |
1115 curr_pic_->pic_order_cnt = std::min(curr_pic_->top_field_order_cnt, | |
1116 curr_pic_->bottom_field_order_cnt); | |
1117 break; | |
1118 case H264Picture::FIELD_TOP: | |
1119 curr_pic_->pic_order_cnt = curr_pic_->top_field_order_cnt; | |
1120 break; | |
1121 case H264Picture::FIELD_BOTTOM: | |
1122 curr_pic_->pic_order_cnt = curr_pic_->bottom_field_order_cnt; | |
1123 break; | |
1124 } | |
1125 | |
1126 return true; | |
1127 } | |
1128 | |
1129 void VaapiH264Decoder::UpdatePicNums() { | |
1130 for (H264Picture::PtrVector::iterator it = dpb_.begin(); it != dpb_.end(); | |
1131 ++it) { | |
1132 H264Picture* pic = *it; | |
1133 DCHECK(pic); | |
1134 if (!pic->ref) | |
1135 continue; | |
1136 | |
1137 // Below assumes non-interlaced stream. | |
1138 DCHECK_EQ(pic->field, H264Picture::FIELD_NONE); | |
1139 if (pic->long_term) { | |
1140 pic->long_term_pic_num = pic->long_term_frame_idx; | |
1141 } else { | |
1142 if (pic->frame_num > frame_num_) | |
1143 pic->frame_num_wrap = pic->frame_num - max_frame_num_; | |
1144 else | |
1145 pic->frame_num_wrap = pic->frame_num; | |
1146 | |
1147 pic->pic_num = pic->frame_num_wrap; | |
1148 } | |
1149 } | |
1150 } | |
1151 | |
1152 struct PicNumDescCompare { | |
Ami GONE FROM CHROMIUM
2012/04/09 21:35:53
I'm not as smooth w/ STL as I might be, but I thou
Pawel Osciak
2012/05/03 16:22:07
It can be easier for the compiler to optimize out
Ami GONE FROM CHROMIUM
2012/05/03 23:22:53
You've just given the classical justification for
Pawel Osciak
2012/05/06 17:49:19
Ok. I'm leaving it this way though, any C++ progra
| |
1153 bool operator()(const H264Picture* a, const H264Picture* b) const { | |
1154 return a->pic_num > b->pic_num; | |
1155 } | |
1156 }; | |
1157 | |
1158 struct LongTermPicNumAscCompare { | |
1159 bool operator()(const H264Picture* a, const H264Picture* b) const { | |
1160 return a->long_term_pic_num < b->long_term_pic_num; | |
1161 } | |
1162 }; | |
1163 | |
1164 void VaapiH264Decoder::ConstructReferencePicListsP(H264SliceHeader* slice_hdr) { | |
1165 // RefPicList0 (8.2.4.2.1) [[1] [2]], where: | |
1166 // [1] shortterm ref pics sorted by descending pic_num, | |
1167 // [2] longterm ref pics by ascending long_term_pic_num. | |
1168 DCHECK(ref_pic_list0_.empty() && ref_pic_list1_.empty()); | |
1169 // First get the short ref pics... | |
1170 dpb_.GetShortTermRefPicsAppending(ref_pic_list0_); | |
1171 size_t num_short_refs = ref_pic_list0_.size(); | |
1172 | |
1173 // and sort them to get [1]. | |
1174 std::sort(ref_pic_list0_.begin(), ref_pic_list0_.end(), PicNumDescCompare()); | |
1175 | |
1176 // Now get long term pics and sort them by long_term_pic_num to get [2]. | |
1177 dpb_.GetLongTermRefPicsAppending(ref_pic_list0_); | |
1178 std::sort(ref_pic_list0_.begin() + num_short_refs, ref_pic_list0_.end(), | |
1179 LongTermPicNumAscCompare()); | |
1180 | |
1181 // Cut off if we have more than requested in slice header. | |
1182 ref_pic_list0_.resize(slice_hdr->num_ref_idx_l0_active_minus1 + 1); | |
1183 } | |
1184 | |
1185 struct POCAscCompare { | |
1186 bool operator()(const H264Picture* a, const H264Picture* b) const { | |
1187 return a->pic_order_cnt < b->pic_order_cnt; | |
1188 } | |
1189 }; | |
1190 | |
1191 struct POCDescCompare { | |
1192 bool operator()(const H264Picture* a, const H264Picture* b) const { | |
1193 return a->pic_order_cnt > b->pic_order_cnt; | |
1194 } | |
1195 }; | |
1196 | |
1197 void VaapiH264Decoder::ConstructReferencePicListsB(H264SliceHeader* slice_hdr) { | |
1198 // RefPicList0 (8.2.4.2.3) [[1] [2] [3]], where: | |
1199 // [1] shortterm ref pics with POC < curr_pic's POC sorted by descending POC, | |
1200 // [2] shortterm ref pics with POC > curr_pic's POC by ascending POC, | |
1201 // [3] longterm ref pics by ascending long_term_pic_num. | |
1202 DCHECK(ref_pic_list0_.empty() && ref_pic_list1_.empty()); | |
1203 dpb_.GetShortTermRefPicsAppending(ref_pic_list0_); | |
1204 size_t num_short_refs = ref_pic_list0_.size(); | |
1205 | |
1206 // First sort ascending, this will put [1] in right place and finish [2]. | |
1207 std::sort(ref_pic_list0_.begin(), ref_pic_list0_.end(), POCAscCompare()); | |
1208 | |
1209 // Find first with POC > curr_pic's POC to get first element in [2]... | |
1210 H264Picture::PtrVector::iterator iter; | |
1211 iter = std::upper_bound(ref_pic_list0_.begin(), ref_pic_list0_.end(), | |
1212 curr_pic_.get(), POCAscCompare()); | |
1213 | |
1214 // and sort [1] descending, thus finishing sequence [1] [2]. | |
1215 std::sort(ref_pic_list0_.begin(), iter, POCDescCompare()); | |
1216 | |
1217 // Now add [3] and sort by ascending long_term_pic_num. | |
1218 dpb_.GetLongTermRefPicsAppending(ref_pic_list0_); | |
1219 std::sort(ref_pic_list0_.begin() + num_short_refs, ref_pic_list0_.end(), | |
1220 LongTermPicNumAscCompare()); | |
1221 | |
1222 // RefPicList1 (8.2.4.2.4) [[1] [2] [3]], where: | |
1223 // [1] shortterm ref pics with POC > curr_pic's POC sorted by ascending POC, | |
1224 // [2] shortterm ref pics with POC < curr_pic's POC by descending POC, | |
1225 // [3] longterm ref pics by ascending long_term_pic_num. | |
1226 | |
1227 dpb_.GetShortTermRefPicsAppending(ref_pic_list1_); | |
1228 num_short_refs = ref_pic_list1_.size(); | |
1229 | |
1230 // First sort by descending POC. | |
1231 std::sort(ref_pic_list1_.begin(), ref_pic_list1_.end(), POCDescCompare()); | |
1232 | |
1233 // Find first with POC < curr_pic's POC to get first element in [2]... | |
1234 iter = std::upper_bound(ref_pic_list1_.begin(), ref_pic_list1_.end(), | |
1235 curr_pic_.get(), POCDescCompare()); | |
1236 | |
1237 // and sort [1] ascending. | |
1238 std::sort(ref_pic_list1_.begin(), iter, POCAscCompare()); | |
1239 | |
1240 // Now add [3] and sort by ascending long_term_pic_num | |
1241 dpb_.GetShortTermRefPicsAppending(ref_pic_list1_); | |
1242 std::sort(ref_pic_list1_.begin() + num_short_refs, ref_pic_list1_.end(), | |
1243 LongTermPicNumAscCompare()); | |
1244 | |
1245 // If lists identical, swap first two entries in RefPicList1 (spec 8.2.4.2.3) | |
1246 if (ref_pic_list1_.size() > 1 && | |
1247 std::equal(ref_pic_list0_.begin(), ref_pic_list0_.end(), | |
1248 ref_pic_list1_.begin())) | |
1249 std::swap(ref_pic_list1_[0], ref_pic_list1_[1]); | |
1250 | |
1251 // Per 8.2.4.2 it's possible for num_ref_idx_lX_active_minus1 to indicate | |
1252 // there should be more ref pics on list than we constructed. | |
1253 // Those superfluous ones should be treated as non-reference. | |
1254 ref_pic_list0_.resize(slice_hdr->num_ref_idx_l0_active_minus1 + 1); | |
1255 ref_pic_list1_.resize(slice_hdr->num_ref_idx_l1_active_minus1 + 1); | |
1256 } | |
1257 | |
1258 // See 8.2.4 | |
1259 int VaapiH264Decoder::PicNumF(H264Picture *pic) { | |
1260 if (!pic) | |
1261 return -1; | |
1262 | |
1263 if (!pic->long_term) | |
1264 return pic->pic_num; | |
1265 else | |
1266 return max_pic_num_; | |
1267 } | |
1268 | |
1269 // See 8.2.4 | |
1270 int VaapiH264Decoder::LongTermPicNumF(H264Picture *pic) { | |
1271 if (pic->ref && pic->long_term) | |
1272 return pic->long_term_pic_num; | |
1273 else | |
1274 return 2 * (max_long_term_frame_idx_ + 1); | |
1275 } | |
1276 | |
1277 // Shift elements on the |v| starting from |from| to |to|, inclusive, | |
1278 // one position to the right and insert pic at |from|. | |
1279 static void ShiftRightAndInsert(H264Picture::PtrVector& v, | |
1280 int from, | |
1281 int to, | |
1282 H264Picture* pic) { | |
1283 DCHECK(pic); | |
1284 for (int i = to + 1; i > from; --i) | |
1285 v[i] = v[i - 1]; | |
1286 | |
1287 v[from] = pic; | |
1288 } | |
1289 | |
1290 bool VaapiH264Decoder::ModifyReferencePicList(H264SliceHeader *slice_hdr, | |
1291 int list) { | |
1292 int num_ref_idx_lX_active_minus1; | |
1293 H264Picture::PtrVector* ref_pic_listx; | |
1294 H264ModificationOfPicNum* list_mod; | |
1295 | |
1296 // This can process either ref_pic_list0 or ref_pic_list1, depending on | |
1297 // the list argument. Set up pointers to proper list to be processed here. | |
1298 if (list == 0) { | |
1299 if (!slice_hdr->ref_pic_list_modification_flag_l0) | |
1300 return true; | |
1301 | |
1302 list_mod = slice_hdr->ref_list_l0_modifications; | |
1303 num_ref_idx_lX_active_minus1 = ref_pic_list0_.size() - 1; | |
1304 | |
1305 ref_pic_listx = &ref_pic_list0_; | |
1306 } else { | |
1307 if (!slice_hdr->ref_pic_list_modification_flag_l1) | |
1308 return true; | |
1309 | |
1310 list_mod = slice_hdr->ref_list_l1_modifications; | |
1311 num_ref_idx_lX_active_minus1 = ref_pic_list1_.size() - 1; | |
1312 | |
1313 ref_pic_listx = &ref_pic_list1_; | |
1314 } | |
1315 | |
1316 DCHECK_GT(num_ref_idx_lX_active_minus1, 0); | |
1317 | |
1318 // Spec 8.2.4.3: | |
1319 // Reorder pictures on the list in a way specified in the stream. | |
1320 int pic_num_lx_pred = curr_pic_->pic_num; | |
1321 int ref_idx_lx = 0; | |
1322 int pic_num_lx_no_wrap; | |
1323 int pic_num_lx; | |
1324 for (int i = 0; i < H264SliceHeader::kRefListModSize; ++i) { | |
1325 switch (list_mod->modification_of_pic_nums_idc) { | |
1326 case 0: | |
1327 case 1: | |
1328 // Modify short reference picture position. | |
1329 if (list_mod->modification_of_pic_nums_idc == 0) { | |
1330 // Subtract given value from predicted PicNum. | |
1331 pic_num_lx_no_wrap = pic_num_lx_pred - | |
1332 (static_cast<int>(list_mod->abs_diff_pic_num_minus1) + 1); | |
1333 // Wrap around max_pic_num_ if it becomes < 0 as result | |
1334 // of subtraction. | |
1335 if (pic_num_lx_no_wrap < 0) | |
1336 pic_num_lx_no_wrap += max_pic_num_; | |
1337 } else { | |
1338 // Add given value to predicted PicNum. | |
1339 pic_num_lx_no_wrap = pic_num_lx_pred + | |
1340 (static_cast<int>(list_mod->abs_diff_pic_num_minus1) + 1); | |
1341 // Wrap around max_pic_num_ if it becomes >= max_pic_num_ as result | |
1342 // of the addition. | |
1343 if (pic_num_lx_no_wrap >= max_pic_num_) | |
1344 pic_num_lx_no_wrap -= max_pic_num_; | |
1345 } | |
1346 | |
1347 // For use in next iteration. | |
1348 pic_num_lx_pred = pic_num_lx_no_wrap; | |
1349 | |
1350 if (pic_num_lx_no_wrap > curr_pic_->pic_num) | |
1351 pic_num_lx = pic_num_lx_no_wrap - max_pic_num_; | |
1352 else | |
1353 pic_num_lx = pic_num_lx_no_wrap; | |
1354 | |
1355 DCHECK_LT(num_ref_idx_lX_active_minus1 + 1, | |
1356 H264SliceHeader::kRefListModSize); | |
1357 ShiftRightAndInsert(*ref_pic_listx, ref_idx_lx, | |
1358 num_ref_idx_lX_active_minus1, | |
1359 dpb_.GetShortRefPicByPicNum(pic_num_lx)); | |
1360 | |
1361 ref_idx_lx++; | |
1362 | |
1363 for (int src = ref_idx_lx, dst = ref_idx_lx; | |
1364 src <= num_ref_idx_lX_active_minus1 + 1; ++src) { | |
1365 if (PicNumF((*ref_pic_listx)[src]) != pic_num_lx) | |
1366 (*ref_pic_listx)[dst++] = (*ref_pic_listx)[src]; | |
1367 } | |
1368 break; | |
1369 | |
1370 case 2: | |
1371 // Modify long term reference picture position. | |
1372 DCHECK_LT(num_ref_idx_lX_active_minus1 + 1, | |
1373 H264SliceHeader::kRefListModSize); | |
1374 ShiftRightAndInsert(*ref_pic_listx, ref_idx_lx, | |
1375 num_ref_idx_lX_active_minus1, | |
1376 dpb_.GetLongRefPicByLongTermPicNum(list_mod->long_term_pic_num)); | |
1377 | |
1378 ref_idx_lx++; | |
1379 | |
1380 for (int src = ref_idx_lx, dst = ref_idx_lx; | |
1381 src <= num_ref_idx_lX_active_minus1 + 1; ++src) { | |
1382 if (LongTermPicNumF((*ref_pic_listx)[src]) | |
1383 != static_cast<int>(list_mod->long_term_pic_num)) | |
1384 (*ref_pic_listx)[dst++] = (*ref_pic_listx)[src]; | |
1385 } | |
1386 break; | |
1387 | |
1388 case 3: | |
1389 // End of modification list. | |
1390 return true; | |
1391 | |
1392 default: | |
1393 // May be recoverable. | |
1394 DVLOG(1) << "Invalid modification_of_pic_nums_idc=" | |
1395 << list_mod->modification_of_pic_nums_idc | |
1396 << " in position " << i; | |
1397 break; | |
1398 } | |
1399 | |
1400 ++list_mod; | |
1401 } | |
1402 | |
1403 return true; | |
1404 } | |
1405 | |
1406 bool VaapiH264Decoder::PutPicToTexture(int32 picture_buffer_id) { | |
1407 DecodeSurfaces::iterator it = decode_surfaces_.find(picture_buffer_id); | |
1408 if (it == decode_surfaces_.end()) { | |
1409 DVLOG(1) << "Asked to put an invalid buffer"; | |
1410 return false; | |
1411 } | |
1412 | |
1413 DVLOG(3) << "Will output from VASurface " << it->second->va_surface_id() | |
1414 << " to texture id " << it->second->texture_id(); | |
1415 | |
1416 if (!it->second->Sync()) | |
Ami GONE FROM CHROMIUM
2012/04/09 21:35:53
if (!foo)
return false;
return true;
is equiva
Pawel Osciak
2012/05/03 16:22:07
Of course.
| |
1417 return false; | |
1418 | |
1419 return true; | |
1420 } | |
1421 | |
1422 bool VaapiH264Decoder::OutputPic(H264Picture* pic) { | |
1423 // No longer need to keep POC->surface mapping, since for decoder this POC | |
1424 // is finished with. When the client returns this surface via | |
1425 // ReusePictureBuffer(), it will be marked back as available for use. | |
1426 DecodeSurface* dec_surface = UnassignSurfaceFromPoC(pic->pic_order_cnt); | |
1427 if (!dec_surface) | |
1428 return false; | |
1429 | |
1430 // Notify the client that a picture can be output. The decoded picture may | |
1431 // not be synced with texture contents yet at this point. The client has | |
1432 // to use PutPicToTexture() to ensure that. | |
1433 DVLOG(4) << "Posting output task for input_id: " << dec_surface->input_id() | |
1434 << "output_id: " << dec_surface->picture_buffer_id(); | |
1435 output_pic_callback_.Run(dec_surface->input_id(), | |
1436 dec_surface->picture_buffer_id()); | |
1437 return true; | |
1438 } | |
1439 | |
1440 bool VaapiH264Decoder::Flush() { | |
1441 // Output all pictures that are waiting to be outputted. | |
1442 H264Picture::PtrVector to_output; | |
1443 dpb_.GetNotOutputtedPicsAppending(to_output); | |
1444 // Sort them by ascending POC to output in order. | |
1445 std::sort(to_output.begin(), to_output.end(), POCAscCompare()); | |
1446 | |
1447 H264Picture::PtrVector::iterator it; | |
1448 for (it = to_output.begin(); it != to_output.end(); ++it) { | |
1449 if (!OutputPic(*it)) { | |
1450 DVLOG(1) << "Failed to output pic POC: " << (*it)->pic_order_cnt; | |
1451 return false; | |
1452 } | |
1453 } | |
1454 | |
1455 // And clear DPB contents. | |
1456 dpb_.Clear(); | |
1457 | |
1458 return true; | |
1459 } | |
1460 | |
1461 bool VaapiH264Decoder::StartNewFrame(H264SliceHeader* slice_hdr) { | |
1462 // TODO posciak: add handling of max_num_ref_frames per spec. | |
1463 | |
1464 // If the new frame is an IDR, output what's left to output and clear DPB | |
1465 if (slice_hdr->idr_pic_flag) { | |
1466 // (unless we are explicitly instructed not to do so). | |
1467 if (!slice_hdr->no_output_of_prior_pics_flag) { | |
1468 // Output DPB contents. | |
1469 if (!Flush()) | |
1470 return false; | |
1471 } | |
1472 dpb_.Clear(); | |
1473 } | |
1474 | |
1475 // curr_pic_ should have either been added to DPB or discarded when finishing | |
1476 // the last frame. DPB is responsible for releasing that memory once it's | |
1477 // not needed anymore. | |
1478 DCHECK(!curr_pic_.get()); | |
1479 curr_pic_.reset(new H264Picture); | |
1480 CHECK(curr_pic_.get()); | |
1481 | |
1482 if (!InitCurrPicture(slice_hdr)) | |
1483 return false; | |
1484 | |
1485 DCHECK_GT(max_frame_num_, 0); | |
1486 | |
1487 UpdatePicNums(); | |
1488 | |
1489 // Prepare reference picture lists if required (B and S/SP slices). | |
1490 ref_pic_list0_.clear(); | |
1491 ref_pic_list1_.clear(); | |
1492 if (IsH264PSlice(slice_hdr) || IsH264SPSlice(slice_hdr)) { | |
1493 ConstructReferencePicListsP(slice_hdr); | |
1494 ModifyReferencePicList(slice_hdr, 0); | |
1495 } else if (IsH264BSlice(slice_hdr)) { | |
1496 ConstructReferencePicListsB(slice_hdr); | |
1497 ModifyReferencePicList(slice_hdr, 0); | |
1498 ModifyReferencePicList(slice_hdr, 1); | |
1499 } | |
1500 | |
1501 // Send parameter buffers before each new picture, before the first slice. | |
1502 if (!SendPPS()) | |
1503 return false; | |
1504 | |
1505 if (!SendIQMatrix()) | |
1506 return false; | |
1507 | |
1508 if (!QueueSlice(slice_hdr)) | |
1509 return false; | |
1510 | |
1511 return true; | |
1512 } | |
1513 | |
1514 bool VaapiH264Decoder::HandleMemoryManagementOps() { | |
1515 // 8.2.5.4 | |
1516 for (unsigned int i = 0; i < arraysize(curr_pic_->ref_pic_marking); ++i) { | |
1517 // Code below does not support interlaced stream (per-field pictures). | |
1518 content::H264DecRefPicMarking* ref_pic_marking = | |
1519 &curr_pic_->ref_pic_marking[i]; | |
1520 H264Picture* to_mark; | |
1521 int pic_num_x; | |
1522 | |
1523 switch (ref_pic_marking->memory_mgmnt_control_operation) { | |
1524 case 0: | |
1525 // Normal end of operations' specification. | |
1526 return true; | |
1527 | |
1528 case 1: | |
1529 // Mark a short term reference picture as unused so it can be removed | |
1530 // if outputted. | |
1531 pic_num_x = curr_pic_->pic_num - | |
1532 (ref_pic_marking->difference_of_pic_nums_minus1 + 1); | |
1533 to_mark = dpb_.GetShortRefPicByPicNum(pic_num_x); | |
1534 if (to_mark) { | |
1535 to_mark->ref = false; | |
1536 } else { | |
1537 DVLOG(1) << "Invalid short ref pic num to unmark"; | |
1538 return false; | |
1539 } | |
1540 break; | |
1541 | |
1542 case 2: | |
1543 // Mark a long term reference picture as unused so it can be removed | |
1544 // if outputted. | |
1545 to_mark = dpb_.GetLongRefPicByLongTermPicNum( | |
1546 ref_pic_marking->long_term_pic_num); | |
1547 if (to_mark) { | |
1548 to_mark->ref = false; | |
1549 } else { | |
1550 DVLOG(1) << "Invalid long term ref pic num to unmark"; | |
1551 return false; | |
1552 } | |
1553 break; | |
1554 | |
1555 case 3: | |
1556 // Mark a short term reference picture as long term reference. | |
1557 pic_num_x = curr_pic_->pic_num - | |
1558 (ref_pic_marking->difference_of_pic_nums_minus1 + 1); | |
1559 to_mark = dpb_.GetShortRefPicByPicNum(pic_num_x); | |
1560 if (to_mark) { | |
1561 DCHECK(to_mark->ref && !to_mark->long_term); | |
1562 to_mark->long_term = true; | |
1563 to_mark->long_term_frame_idx = ref_pic_marking->long_term_frame_idx; | |
1564 } else { | |
1565 DVLOG(1) << "Invalid short term ref pic num to mark as long ref"; | |
1566 return false; | |
1567 } | |
1568 break; | |
1569 | |
1570 case 4: { | |
1571 // Unmark all reference pictures with long_term_frame_idx over new max. | |
1572 max_long_term_frame_idx_ | |
1573 = ref_pic_marking->max_long_term_frame_idx_plus1 - 1; | |
1574 H264Picture::PtrVector long_terms; | |
1575 dpb_.GetLongTermRefPicsAppending(long_terms); | |
1576 for (size_t i = 0; i < long_terms.size(); ++i) { | |
1577 H264Picture* pic = long_terms[i]; | |
1578 DCHECK(pic->ref && pic->long_term); | |
1579 // Ok to cast, max_long_term_frame_idx is much smaller than 16bit. | |
1580 if (pic->long_term_frame_idx > | |
1581 static_cast<int>(max_long_term_frame_idx_)) | |
1582 pic->ref = false; | |
1583 } | |
1584 break; | |
1585 } | |
1586 | |
1587 case 5: | |
1588 // Unmark all reference pictures. | |
1589 dpb_.MarkAllUnusedForRef(); | |
1590 max_long_term_frame_idx_ = -1; | |
1591 curr_pic_->mem_mgmt_5 = true; | |
1592 break; | |
1593 | |
1594 case 6: { | |
1595 // Replace long term reference pictures with current picture. | |
1596 // First unmark if any existing with this long_term_frame_idx... | |
1597 H264Picture::PtrVector long_terms; | |
1598 dpb_.GetLongTermRefPicsAppending(long_terms); | |
1599 for (size_t i = 0; i < long_terms.size(); ++i) { | |
1600 H264Picture* pic = long_terms[i]; | |
1601 DCHECK(pic->ref && pic->long_term); | |
1602 // Ok to cast, long_term_frame_idx is much smaller than 16bit. | |
1603 if (pic->long_term_frame_idx == | |
1604 static_cast<int>(ref_pic_marking->long_term_frame_idx)) | |
1605 pic->ref = false; | |
1606 } | |
1607 | |
1608 // and mark the current one instead. | |
1609 curr_pic_->ref = true; | |
1610 curr_pic_->long_term = true; | |
1611 curr_pic_->long_term_frame_idx = ref_pic_marking->long_term_frame_idx; | |
1612 break; | |
1613 } | |
1614 | |
1615 default: | |
1616 // Would indicate a bug in parser. | |
1617 NOTREACHED(); | |
1618 } | |
1619 } | |
1620 | |
1621 return true; | |
1622 } | |
1623 | |
1624 // This method ensures that DPB does not overflow, either by removing | |
1625 // reference pictures as specified in the stream, or using a sliding window | |
1626 // procedure to remove the oldest one. | |
1627 // It also performs marking and unmarking pictures as reference. | |
1628 // See spac 8.2.5.1. | |
1629 void VaapiH264Decoder::ReferencePictureMarking() { | |
1630 if (curr_pic_->idr) { | |
1631 // If current picture is an IDR, all reference pictures are unmarked. | |
1632 dpb_.MarkAllUnusedForRef(); | |
1633 | |
1634 if (curr_pic_->long_term_reference_flag) { | |
1635 curr_pic_->long_term = true; | |
1636 curr_pic_->long_term_frame_idx = 0; | |
1637 max_long_term_frame_idx_ = 0; | |
1638 } else { | |
1639 curr_pic_->long_term = false; | |
1640 max_long_term_frame_idx_ = -1; | |
1641 } | |
1642 } else { | |
1643 if (!curr_pic_->adaptive_ref_pic_marking_mode_flag) { | |
1644 // If non-IDR, and the stream does not indicate what we should do to | |
1645 // ensure DPB doesn't overflow, discard oldest picture. | |
1646 // See spec 8.2.5.3. | |
1647 if (curr_pic_->field == H264Picture::FIELD_NONE) { | |
1648 DCHECK_LE(dpb_.CountRefPics(), | |
1649 std::max<int>(parser_.GetSPS(curr_sps_id_)->max_num_ref_frames, | |
1650 1)); | |
1651 if (dpb_.CountRefPics() == | |
1652 std::max<int>(parser_.GetSPS(curr_sps_id_)->max_num_ref_frames, | |
1653 1)) { | |
1654 // Max number of reference pics reached, | |
1655 // need to remove one of the short term ones. | |
1656 // Find smallest frame_num_wrap short reference picture and mark | |
1657 // it as unused. | |
1658 H264Picture* to_unmark = dpb_.GetLowestFrameNumWrapShortRefPic(); | |
1659 if (to_unmark == NULL) { | |
1660 DVLOG(1) << "Couldn't find a short ref picture to unmark"; | |
1661 return; | |
1662 } | |
1663 to_unmark->ref = false; | |
1664 } | |
1665 } else { | |
1666 // Shouldn't get here. | |
1667 DVLOG(1) << "Interlaced video not supported."; | |
1668 } | |
1669 } else { | |
1670 // Stream has instructions how to discard pictures from DPB and how | |
1671 // to mark/unmark existing reference pictures. Do it. | |
1672 // Spec 8.2.5.4. | |
1673 if (curr_pic_->field == H264Picture::FIELD_NONE) { | |
1674 HandleMemoryManagementOps(); | |
1675 } else { | |
1676 // Shouldn't get here. | |
1677 DVLOG(1) << "Interlaced video not supported."; | |
1678 } | |
1679 } | |
1680 } | |
1681 } | |
1682 | |
1683 bool VaapiH264Decoder::FinishPicture() { | |
1684 DCHECK(curr_pic_.get()); | |
1685 | |
1686 // Finish processing previous picture. | |
1687 // Start by storing previous reference picture data for later use, | |
1688 // if picture being finished is a reference picture. | |
1689 if (curr_pic_->ref) { | |
1690 ReferencePictureMarking(); | |
1691 prev_ref_has_memmgmnt5_ = curr_pic_->mem_mgmt_5; | |
1692 prev_ref_top_field_order_cnt_ = curr_pic_->top_field_order_cnt; | |
1693 prev_ref_pic_order_cnt_msb_ = curr_pic_->pic_order_cnt_msb; | |
1694 prev_ref_pic_order_cnt_lsb_ = curr_pic_->pic_order_cnt_lsb; | |
1695 prev_ref_field_ = curr_pic_->field; | |
1696 } | |
1697 | |
1698 // Remove unused (for reference or later output) pictures from DPB. | |
1699 dpb_.RemoveUnused(); | |
1700 | |
1701 DVLOG(4) << "Finishing picture, DPB entries: " << dpb_.size() | |
1702 << " Num available dec surfaces: " | |
1703 << num_available_decode_surfaces_; | |
1704 | |
1705 if (dpb_.IsFull()) { | |
1706 // DPB is full, we have to make space for the new picture. | |
1707 // Get all pictures that haven't been outputted yet. | |
1708 H264Picture::PtrVector not_outputted; | |
1709 dpb_.GetNotOutputtedPicsAppending(not_outputted); | |
1710 std::sort(not_outputted.begin(), not_outputted.end(), POCAscCompare()); | |
1711 H264Picture::PtrVector::iterator output_candidate = not_outputted.begin(); | |
1712 | |
1713 // Keep outputting pictures until we can either output the picture being | |
1714 // finished and discard it (if it is not a reference picture), or until | |
1715 // we can discard an older picture that was just waiting for output and | |
1716 // is not a reference picture, thus making space for the current one. | |
1717 while (dpb_.IsFull()) { | |
1718 // Maybe outputted enough to output current picture. | |
1719 if (!curr_pic_->ref && (output_candidate == not_outputted.end() || | |
1720 curr_pic_->pic_order_cnt < (*output_candidate)->pic_order_cnt)) { | |
1721 // curr_pic_ is not a reference picture and no preceding pictures are | |
1722 // waiting for output in DPB, so it can be outputted and discarded | |
1723 // without storing in DPB. | |
1724 if (!OutputPic(curr_pic_.get())) | |
1725 return false; | |
1726 goto no_store; | |
1727 } | |
1728 | |
1729 // Couldn't output current picture, so try to output the lowest PoC | |
1730 // from DPB. | |
1731 if (output_candidate != not_outputted.end()) { | |
1732 if (!OutputPic(*output_candidate)) | |
1733 return false; | |
1734 | |
1735 // If outputted picture wasn't a reference picture, it can be removed. | |
1736 if (!(*output_candidate)->ref) | |
1737 dpb_.RemoveByPOC((*output_candidate)->pic_order_cnt); | |
1738 } else { | |
1739 // Couldn't output current pic and couldn't do anything | |
1740 // with existing pictures in DPB, so we can't make space. | |
1741 // This should not happen. | |
1742 DVLOG(1) << "Could not free up space in DPB!"; | |
1743 return false; | |
1744 } | |
1745 } | |
1746 ++output_candidate; | |
1747 } | |
1748 | |
1749 // Store current picture for later output and/or reference (ownership now | |
1750 // with the DPB). | |
1751 dpb_.StorePic(curr_pic_.release()); | |
1752 | |
1753 no_store: | |
1754 return true; | |
1755 } | |
1756 | |
1757 bool VaapiH264Decoder::ProcessSPS(int sps_id) { | |
1758 const H264SPS* sps = parser_.GetSPS(sps_id); | |
1759 DCHECK(sps); | |
1760 | |
1761 if (sps->frame_mbs_only_flag == 0) { | |
1762 // Fields/interlaced video not supported. | |
1763 DVLOG(1) << "frame_mbs_only_flag != 1 not supported"; | |
1764 return false; | |
1765 } | |
1766 | |
1767 if (sps->gaps_in_frame_num_value_allowed_flag) { | |
1768 DVLOG(1) << "Gaps in frame numbers not supported"; | |
1769 return false; | |
1770 } | |
1771 | |
1772 if (sps->pic_order_cnt_type != 0) { | |
1773 DVLOG(1) << "Unsupported pic_order_cnt_type"; | |
1774 return false; | |
1775 } | |
1776 | |
1777 curr_sps_id_ = sps->seq_parameter_set_id; | |
1778 | |
1779 // Calculate picture height/width (spec 7.4.2.1.1, 7.4.3). | |
1780 int width = 16 * (sps->pic_width_in_mbs_minus1 + 1); | |
1781 int height = 16 * (2 - sps->frame_mbs_only_flag) * | |
1782 (sps->pic_height_in_map_units_minus1 + 1); | |
1783 | |
1784 if ((pic_width_ != -1 || pic_height_ != -1) && | |
1785 (width != pic_width_ || height != pic_height_)) { | |
1786 DVLOG(1) << "Picture size changed mid-stream"; | |
1787 return false; | |
1788 } | |
1789 | |
1790 pic_width_ = width; | |
1791 pic_height_ = height; | |
1792 DVLOG(1) << "New picture size: " << pic_width_ << "x" << pic_height_; | |
1793 | |
1794 max_pic_order_cnt_lsb_ = 1 << (sps->log2_max_pic_order_cnt_lsb_minus4 + 4); | |
1795 max_frame_num_ = 1 << (sps->log2_max_frame_num_minus4 + 4); | |
1796 | |
1797 return true; | |
1798 } | |
1799 | |
1800 bool VaapiH264Decoder::ProcessPPS(int pps_id) { | |
1801 const H264PPS* pps = parser_.GetPPS(pps_id); | |
1802 DCHECK(pps); | |
1803 | |
1804 curr_pps_id_ = pps->pic_parameter_set_id; | |
1805 | |
1806 return true; | |
1807 } | |
1808 | |
1809 bool VaapiH264Decoder::FinishPrevFrameIfPresent() { | |
1810 // If we already have a frame waiting to be decoded, decode it and finish. | |
1811 if (curr_pic_ != NULL) { | |
1812 if (!DecodePicture()) | |
1813 return false; | |
1814 return FinishPicture(); | |
1815 } | |
1816 | |
1817 return true; | |
1818 } | |
1819 | |
1820 bool VaapiH264Decoder::ProcessSlice(H264SliceHeader* slice_hdr) { | |
1821 prev_frame_num_ = frame_num_; | |
1822 frame_num_ = slice_hdr->frame_num; | |
1823 | |
1824 if (prev_frame_num_ > 0 && prev_frame_num_ < frame_num_ - 1) { | |
1825 DVLOG(1) << "Gap in frame_num!"; | |
1826 return false; | |
1827 } | |
1828 | |
1829 if (slice_hdr->field_pic_flag == 0) | |
1830 max_pic_num_ = max_frame_num_; | |
1831 else | |
1832 max_pic_num_ = 2 * max_frame_num_; | |
1833 | |
1834 // TODO posciak: switch to new picture detection per 7.4.1.2.4. | |
1835 if (curr_pic_ != NULL && slice_hdr->first_mb_in_slice != 0) { | |
1836 // This is just some more slice data of the current picture, so | |
1837 // just queue it and return. | |
1838 QueueSlice(slice_hdr); | |
1839 return true; | |
1840 } else { | |
1841 // A new frame, so first finish the previous one before processing it... | |
1842 if (!FinishPrevFrameIfPresent()) | |
1843 return false; | |
1844 | |
1845 // and then start a new one. | |
1846 return StartNewFrame(slice_hdr); | |
1847 } | |
1848 } | |
1849 | |
1850 #define SET_ERROR_AND_RETURN() \ | |
1851 do { \ | |
1852 DVLOG(1) << "Error during decode"; \ | |
1853 state_ = kError; \ | |
1854 return VaapiH264Decoder::kDecodeError; \ | |
1855 } while (0) | |
1856 | |
1857 VaapiH264Decoder::DecResult VaapiH264Decoder::DecodeInitial(int32 input_id) { | |
1858 // Decode enough to get required picture size (i.e. until we find an SPS), | |
1859 // if we get any slice data, we are missing the beginning of the stream. | |
1860 H264NALU nalu; | |
1861 H264Parser::Result res; | |
1862 | |
1863 DCHECK_NE(state_, kUninitialized); | |
1864 | |
1865 curr_input_id_ = input_id; | |
1866 | |
1867 while (1) { | |
1868 // Get next NALU looking for SPS or IDR if after reset. | |
1869 res = parser_.AdvanceToNextNALU(&nalu); | |
1870 if (res == H264Parser::kEOStream) { | |
1871 DVLOG(1) << "Could not find SPS before EOS"; | |
1872 return kNeedMoreStreamData; | |
1873 } else if (res != H264Parser::kOk) { | |
1874 SET_ERROR_AND_RETURN(); | |
1875 } | |
1876 | |
1877 DVLOG(4) << " NALU found: " << static_cast<int>(nalu.nal_unit_type); | |
1878 | |
1879 switch (nalu.nal_unit_type) { | |
1880 case H264NALU::kH264NaluSPS: | |
1881 res = parser_.ParseSPS(&curr_sps_id_); | |
1882 if (res != H264Parser::kOk) | |
1883 SET_ERROR_AND_RETURN(); | |
1884 | |
1885 if (!ProcessSPS(curr_sps_id_)) | |
1886 SET_ERROR_AND_RETURN(); | |
1887 | |
1888 // Just got information about the video size from SPS, so we can | |
1889 // now allocate surfaces and let the client now we are ready to | |
1890 // accept output buffers and decode. | |
1891 if (!CreateVASurfaces()) | |
1892 SET_ERROR_AND_RETURN(); | |
1893 | |
1894 state_ = kDecoding; | |
1895 return kReadyToDecode; | |
1896 | |
1897 case H264NALU::kH264NaluIDRSlice: | |
1898 // If after reset, should be able to recover from an IDR. | |
1899 if (state_ == kAfterReset) { | |
1900 H264SliceHeader slice_hdr; | |
1901 | |
1902 res = parser_.ParseSliceHeader(&slice_hdr, &nalu); | |
1903 if (res != H264Parser::kOk) | |
1904 SET_ERROR_AND_RETURN(); | |
1905 | |
1906 if (!ProcessSlice(&slice_hdr)) | |
1907 SET_ERROR_AND_RETURN(); | |
1908 | |
1909 state_ = kDecoding; | |
1910 return kReadyToDecode; | |
1911 } // else fallthrough | |
1912 case H264NALU::kH264NaluNonIDRSlice: | |
1913 case H264NALU::kH264NaluPPS: | |
1914 // Non-IDR slices cannot be used as resume points, as we may not | |
1915 // have all reference pictures that they may require. | |
1916 // fallthrough | |
1917 default: | |
1918 // Skip everything unless it's PPS or an IDR slice (if after reset). | |
1919 DVLOG(4) << "Skipping NALU"; | |
1920 break; | |
1921 } | |
1922 } | |
1923 } | |
1924 | |
1925 void VaapiH264Decoder::SetStream(uint8* ptr, size_t size) { | |
1926 DCHECK(ptr); | |
1927 DCHECK(size); | |
1928 | |
1929 // Got new input stream data from the client. | |
1930 DVLOG(4) << "New input stream chunk at " << (void*) ptr | |
1931 << " size: " << size; | |
1932 parser_.SetStream(ptr, size); | |
1933 } | |
1934 | |
1935 VaapiH264Decoder::DecResult VaapiH264Decoder::DecodeOneFrame(int32 input_id) { | |
1936 // Decode until one full frame is decoded or return it or until end | |
1937 // of stream (end of input data is reached). | |
1938 H264Parser::Result par_res; | |
1939 H264NALU nalu; | |
1940 | |
1941 curr_input_id_ = input_id; | |
1942 | |
1943 if (state_ != kDecoding) { | |
1944 DVLOG(1) << "Decoder not ready: error in stream or not initialized"; | |
1945 return kDecodeError; | |
1946 } else if (num_available_decode_surfaces_ < 1) { | |
1947 DVLOG(4) << "No output surfaces available"; | |
1948 return kNoOutputAvailable; | |
1949 } | |
1950 | |
1951 // All of the actions below might result in decoding a picture from | |
1952 // previously parsed data, but we still have to handle/parse current input | |
1953 // first. | |
1954 // Note: this may drop some already decoded frames if there are errors | |
1955 // further in the stream, but we are OK with that. | |
1956 while (1) { | |
1957 par_res = parser_.AdvanceToNextNALU(&nalu); | |
1958 if (par_res == H264Parser::kEOStream) | |
1959 return kNeedMoreStreamData; | |
1960 else if (par_res != H264Parser::kOk) | |
1961 SET_ERROR_AND_RETURN(); | |
1962 | |
1963 DVLOG(4) << "NALU found: " << static_cast<int>(nalu.nal_unit_type); | |
1964 | |
1965 switch (nalu.nal_unit_type) { | |
1966 case H264NALU::kH264NaluNonIDRSlice: | |
1967 case H264NALU::kH264NaluIDRSlice: { | |
1968 H264SliceHeader slice_hdr; | |
1969 | |
1970 par_res = parser_.ParseSliceHeader(&slice_hdr, &nalu); | |
1971 if (par_res != H264Parser::kOk) | |
1972 SET_ERROR_AND_RETURN(); | |
1973 | |
1974 if (!ProcessSlice(&slice_hdr)) | |
1975 SET_ERROR_AND_RETURN(); | |
1976 break; | |
1977 } | |
1978 | |
1979 case H264NALU::kH264NaluSPS: | |
1980 int sps_id; | |
1981 | |
1982 if (!FinishPrevFrameIfPresent()) | |
1983 SET_ERROR_AND_RETURN(); | |
1984 | |
1985 par_res = parser_.ParseSPS(&sps_id); | |
1986 if (par_res != H264Parser::kOk) | |
1987 SET_ERROR_AND_RETURN(); | |
1988 | |
1989 if (!ProcessSPS(sps_id)) | |
1990 SET_ERROR_AND_RETURN(); | |
1991 break; | |
1992 | |
1993 case H264NALU::kH264NaluPPS: | |
1994 int pps_id; | |
1995 | |
1996 if (!FinishPrevFrameIfPresent()) | |
1997 SET_ERROR_AND_RETURN(); | |
1998 | |
1999 par_res = parser_.ParsePPS(&pps_id); | |
2000 if (par_res != H264Parser::kOk) | |
2001 SET_ERROR_AND_RETURN(); | |
2002 | |
2003 if (!ProcessPPS(pps_id)) | |
2004 SET_ERROR_AND_RETURN(); | |
2005 break; | |
2006 | |
2007 default: | |
2008 // skip NALU | |
2009 break; | |
2010 } | |
2011 | |
2012 // If the last action resulted in decoding a frame, possibly from older | |
2013 // data, return. Otherwise keep reading the stream. | |
2014 if (frame_ready_at_hw_) { | |
2015 frame_ready_at_hw_ = false; | |
2016 return kDecodedFrame; | |
2017 } | |
2018 } | |
2019 } | |
2020 | |
2021 // static | |
2022 size_t VaapiH264Decoder::GetRequiredNumOfPictures() { | |
2023 return kNumReqPictures; | |
2024 } | |
2025 | |
2026 } // namespace content | |
2027 | |
OLD | NEW |