OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
| 5 #include "content/common/gpu/media/vaapi_video_decode_accelerator.h" |
| 6 |
5 #include "base/bind.h" | 7 #include "base/bind.h" |
6 #include "base/logging.h" | 8 #include "base/logging.h" |
7 #include "base/metrics/histogram.h" | 9 #include "base/metrics/histogram.h" |
8 #include "base/stl_util.h" | 10 #include "base/stl_util.h" |
9 #include "base/strings/string_util.h" | 11 #include "base/strings/string_util.h" |
10 #include "base/synchronization/waitable_event.h" | 12 #include "base/synchronization/waitable_event.h" |
11 #include "base/trace_event/trace_event.h" | 13 #include "base/trace_event/trace_event.h" |
12 #include "content/common/gpu/gpu_channel.h" | 14 #include "content/common/gpu/gpu_channel.h" |
| 15 #include "content/common/gpu/media/accelerated_video_decoder.h" |
| 16 #include "content/common/gpu/media/h264_decoder.h" |
13 #include "content/common/gpu/media/vaapi_picture.h" | 17 #include "content/common/gpu/media/vaapi_picture.h" |
14 #include "content/common/gpu/media/vaapi_video_decode_accelerator.h" | |
15 #include "media/base/bind_to_current_loop.h" | 18 #include "media/base/bind_to_current_loop.h" |
16 #include "media/video/picture.h" | 19 #include "media/video/picture.h" |
17 #include "ui/gl/gl_bindings.h" | 20 #include "ui/gl/gl_bindings.h" |
18 #include "ui/gl/gl_image.h" | 21 #include "ui/gl/gl_image.h" |
19 | 22 |
20 static void ReportToUMA( | 23 namespace content { |
21 content::VaapiH264Decoder::VAVDAH264DecoderFailure failure) { | 24 |
22 UMA_HISTOGRAM_ENUMERATION( | 25 namespace { |
23 "Media.VAVDAH264.DecoderFailure", | 26 // UMA errors that the VaapiVideoDecodeAccelerator class reports. |
24 failure, | 27 enum VAVDADecoderFailure { |
25 content::VaapiH264Decoder::VAVDA_H264_DECODER_FAILURES_MAX); | 28 VAAPI_ERROR = 0, |
| 29 VAVDA_DECODER_FAILURES_MAX, |
| 30 }; |
26 } | 31 } |
27 | 32 |
28 namespace content { | 33 static void ReportToUMA(VAVDADecoderFailure failure) { |
| 34 UMA_HISTOGRAM_ENUMERATION("Media.VAVDA.DecoderFailure", failure, |
| 35 VAVDA_DECODER_FAILURES_MAX); |
| 36 } |
29 | 37 |
30 #define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret) \ | 38 #define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret) \ |
31 do { \ | 39 do { \ |
32 if (!(result)) { \ | 40 if (!(result)) { \ |
33 LOG(ERROR) << log; \ | 41 LOG(ERROR) << log; \ |
34 NotifyError(error_code); \ | 42 NotifyError(error_code); \ |
35 return ret; \ | 43 return ret; \ |
36 } \ | 44 } \ |
37 } while (0) | 45 } while (0) |
38 | 46 |
| 47 class VaapiVideoDecodeAccelerator::VaapiDecodeSurface |
| 48 : public base::RefCountedThreadSafe<VaapiDecodeSurface> { |
| 49 public: |
| 50 VaapiDecodeSurface(int32 bitstream_id, |
| 51 const scoped_refptr<VASurface>& va_surface); |
| 52 |
| 53 int32 bitstream_id() const { return bitstream_id_; } |
| 54 scoped_refptr<VASurface> va_surface() { return va_surface_; } |
| 55 |
| 56 private: |
| 57 friend class base::RefCountedThreadSafe<VaapiDecodeSurface>; |
| 58 ~VaapiDecodeSurface(); |
| 59 |
| 60 int32 bitstream_id_; |
| 61 scoped_refptr<VASurface> va_surface_; |
| 62 }; |
| 63 |
| 64 VaapiVideoDecodeAccelerator::VaapiDecodeSurface::VaapiDecodeSurface( |
| 65 int32 bitstream_id, |
| 66 const scoped_refptr<VASurface>& va_surface) |
| 67 : bitstream_id_(bitstream_id), va_surface_(va_surface) { |
| 68 } |
| 69 |
| 70 VaapiVideoDecodeAccelerator::VaapiDecodeSurface::~VaapiDecodeSurface() { |
| 71 } |
| 72 |
| 73 class VaapiH264Picture : public H264Picture { |
| 74 public: |
| 75 VaapiH264Picture(const scoped_refptr< |
| 76 VaapiVideoDecodeAccelerator::VaapiDecodeSurface>& dec_surface); |
| 77 |
| 78 VaapiH264Picture* AsVaapiH264Picture() override { return this; } |
| 79 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> dec_surface() { |
| 80 return dec_surface_; |
| 81 } |
| 82 |
| 83 private: |
| 84 ~VaapiH264Picture() override; |
| 85 |
| 86 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> dec_surface_; |
| 87 |
| 88 DISALLOW_COPY_AND_ASSIGN(VaapiH264Picture); |
| 89 }; |
| 90 |
| 91 VaapiH264Picture::VaapiH264Picture(const scoped_refptr< |
| 92 VaapiVideoDecodeAccelerator::VaapiDecodeSurface>& dec_surface) |
| 93 : dec_surface_(dec_surface) { |
| 94 } |
| 95 |
| 96 VaapiH264Picture::~VaapiH264Picture() { |
| 97 } |
| 98 |
| 99 class VaapiVideoDecodeAccelerator::VaapiH264Accelerator |
| 100 : public H264Decoder::H264Accelerator { |
| 101 public: |
| 102 VaapiH264Accelerator(VaapiVideoDecodeAccelerator* vaapi_dec, |
| 103 VaapiWrapper* vaapi_wrapper); |
| 104 ~VaapiH264Accelerator() override; |
| 105 |
| 106 // H264Decoder::H264Accelerator implementation. |
| 107 scoped_refptr<H264Picture> CreateH264Picture() override; |
| 108 |
| 109 bool SubmitFrameMetadata(const media::H264SPS* sps, |
| 110 const media::H264PPS* pps, |
| 111 const H264DPB& dpb, |
| 112 const H264Picture::Vector& ref_pic_listp0, |
| 113 const H264Picture::Vector& ref_pic_listb0, |
| 114 const H264Picture::Vector& ref_pic_listb1, |
| 115 const scoped_refptr<H264Picture>& pic) override; |
| 116 |
| 117 bool SubmitSlice(const media::H264PPS* pps, |
| 118 const media::H264SliceHeader* slice_hdr, |
| 119 const H264Picture::Vector& ref_pic_list0, |
| 120 const H264Picture::Vector& ref_pic_list1, |
| 121 const scoped_refptr<H264Picture>& pic, |
| 122 const uint8_t* data, |
| 123 size_t size) override; |
| 124 |
| 125 bool SubmitDecode(const scoped_refptr<H264Picture>& pic) override; |
| 126 bool OutputPicture(const scoped_refptr<H264Picture>& pic) override; |
| 127 |
| 128 void Reset() override; |
| 129 |
| 130 private: |
| 131 scoped_refptr<VaapiDecodeSurface> H264PictureToVaapiDecodeSurface( |
| 132 const scoped_refptr<H264Picture>& pic); |
| 133 |
| 134 void FillVAPicture(VAPictureH264* va_pic, scoped_refptr<H264Picture> pic); |
| 135 int FillVARefFramesFromDPB(const H264DPB& dpb, |
| 136 VAPictureH264* va_pics, |
| 137 int num_pics); |
| 138 |
| 139 VaapiWrapper* vaapi_wrapper_; |
| 140 VaapiVideoDecodeAccelerator* vaapi_dec_; |
| 141 |
| 142 DISALLOW_COPY_AND_ASSIGN(VaapiH264Accelerator); |
| 143 }; |
| 144 |
39 VaapiVideoDecodeAccelerator::InputBuffer::InputBuffer() : id(0), size(0) { | 145 VaapiVideoDecodeAccelerator::InputBuffer::InputBuffer() : id(0), size(0) { |
40 } | 146 } |
41 | 147 |
42 VaapiVideoDecodeAccelerator::InputBuffer::~InputBuffer() { | 148 VaapiVideoDecodeAccelerator::InputBuffer::~InputBuffer() { |
43 } | 149 } |
44 | 150 |
45 void VaapiVideoDecodeAccelerator::NotifyError(Error error) { | 151 void VaapiVideoDecodeAccelerator::NotifyError(Error error) { |
46 if (message_loop_ != base::MessageLoop::current()) { | 152 if (message_loop_ != base::MessageLoop::current()) { |
47 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread()); | 153 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread()); |
48 message_loop_->PostTask(FROM_HERE, base::Bind( | 154 message_loop_->PostTask(FROM_HERE, base::Bind( |
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
117 } | 223 } |
118 #elif defined(USE_OZONE) | 224 #elif defined(USE_OZONE) |
119 if (gfx::GetGLImplementation() != gfx::kGLImplementationEGLGLES2) { | 225 if (gfx::GetGLImplementation() != gfx::kGLImplementationEGLGLES2) { |
120 DVLOG(1) << "HW video decode acceleration not available without " | 226 DVLOG(1) << "HW video decode acceleration not available without " |
121 << "EGLGLES2."; | 227 << "EGLGLES2."; |
122 return false; | 228 return false; |
123 } | 229 } |
124 #endif // USE_X11 | 230 #endif // USE_X11 |
125 | 231 |
126 vaapi_wrapper_ = VaapiWrapper::CreateForVideoCodec( | 232 vaapi_wrapper_ = VaapiWrapper::CreateForVideoCodec( |
127 VaapiWrapper::kDecode, profile, | 233 VaapiWrapper::kDecode, profile, base::Bind(&ReportToUMA, VAAPI_ERROR)); |
128 base::Bind(&ReportToUMA, content::VaapiH264Decoder::VAAPI_ERROR)); | |
129 | 234 |
130 if (!vaapi_wrapper_.get()) { | 235 if (!vaapi_wrapper_.get()) { |
131 DVLOG(1) << "Failed initializing VAAPI for profile " << profile; | 236 DVLOG(1) << "Failed initializing VAAPI for profile " << profile; |
132 return false; | 237 return false; |
133 } | 238 } |
134 | 239 |
135 decoder_.reset( | 240 if (!(profile >= media::H264PROFILE_MIN && |
136 new VaapiH264Decoder( | 241 profile <= media::H264PROFILE_MAX)) { |
137 vaapi_wrapper_.get(), | 242 DLOG(ERROR) << "Unsupported profile " << profile; |
138 media::BindToCurrentLoop(base::Bind( | 243 return false; |
139 &VaapiVideoDecodeAccelerator::SurfaceReady, weak_this_)), | 244 } |
140 base::Bind(&ReportToUMA))); | 245 |
| 246 h264_accelerator_.reset(new VaapiH264Accelerator(this, vaapi_wrapper_.get())); |
| 247 decoder_.reset(new H264Decoder(h264_accelerator_.get())); |
141 | 248 |
142 CHECK(decoder_thread_.Start()); | 249 CHECK(decoder_thread_.Start()); |
143 decoder_thread_proxy_ = decoder_thread_.message_loop_proxy(); | 250 decoder_thread_proxy_ = decoder_thread_.message_loop_proxy(); |
144 | 251 |
145 state_ = kIdle; | 252 state_ = kIdle; |
146 return true; | 253 return true; |
147 } | 254 } |
148 | 255 |
149 void VaapiVideoDecodeAccelerator::SurfaceReady( | |
150 int32 input_id, | |
151 const scoped_refptr<VASurface>& va_surface) { | |
152 DCHECK_EQ(message_loop_, base::MessageLoop::current()); | |
153 DCHECK(!awaiting_va_surfaces_recycle_); | |
154 | |
155 // Drop any requests to output if we are resetting or being destroyed. | |
156 if (state_ == kResetting || state_ == kDestroying) | |
157 return; | |
158 | |
159 pending_output_cbs_.push( | |
160 base::Bind(&VaapiVideoDecodeAccelerator::OutputPicture, | |
161 weak_this_, va_surface, input_id)); | |
162 | |
163 TryOutputSurface(); | |
164 } | |
165 | |
166 void VaapiVideoDecodeAccelerator::OutputPicture( | 256 void VaapiVideoDecodeAccelerator::OutputPicture( |
167 const scoped_refptr<VASurface>& va_surface, | 257 const scoped_refptr<VASurface>& va_surface, |
168 int32 input_id, | 258 int32 input_id, |
169 VaapiPicture* picture) { | 259 VaapiPicture* picture) { |
170 DCHECK_EQ(message_loop_, base::MessageLoop::current()); | 260 DCHECK_EQ(message_loop_, base::MessageLoop::current()); |
171 | 261 |
172 int32 output_id = picture->picture_buffer_id(); | 262 int32 output_id = picture->picture_buffer_id(); |
173 | 263 |
174 TRACE_EVENT2("Video Decoder", "VAVDA::OutputSurface", | 264 TRACE_EVENT2("Video Decoder", "VAVDA::OutputSurface", |
175 "input_id", input_id, | 265 "input_id", input_id, |
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
276 | 366 |
277 curr_input_buffer_ = input_buffers_.front(); | 367 curr_input_buffer_ = input_buffers_.front(); |
278 input_buffers_.pop(); | 368 input_buffers_.pop(); |
279 | 369 |
280 DVLOG(4) << "New current bitstream buffer, id: " | 370 DVLOG(4) << "New current bitstream buffer, id: " |
281 << curr_input_buffer_->id | 371 << curr_input_buffer_->id |
282 << " size: " << curr_input_buffer_->size; | 372 << " size: " << curr_input_buffer_->size; |
283 | 373 |
284 decoder_->SetStream( | 374 decoder_->SetStream( |
285 static_cast<uint8*>(curr_input_buffer_->shm->memory()), | 375 static_cast<uint8*>(curr_input_buffer_->shm->memory()), |
286 curr_input_buffer_->size, curr_input_buffer_->id); | 376 curr_input_buffer_->size); |
287 return true; | 377 return true; |
288 | 378 |
289 default: | 379 default: |
290 // We got woken up due to being destroyed/reset, ignore any already | 380 // We got woken up due to being destroyed/reset, ignore any already |
291 // queued inputs. | 381 // queued inputs. |
292 return false; | 382 return false; |
293 } | 383 } |
294 } | 384 } |
295 | 385 |
296 void VaapiVideoDecodeAccelerator::ReturnCurrInputBuffer_Locked() { | 386 void VaapiVideoDecodeAccelerator::ReturnCurrInputBuffer_Locked() { |
297 lock_.AssertAcquired(); | 387 lock_.AssertAcquired(); |
298 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread()); | 388 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread()); |
299 DCHECK(curr_input_buffer_.get()); | 389 DCHECK(curr_input_buffer_.get()); |
300 | 390 |
301 int32 id = curr_input_buffer_->id; | 391 int32 id = curr_input_buffer_->id; |
302 curr_input_buffer_.reset(); | 392 curr_input_buffer_.reset(); |
303 DVLOG(4) << "End of input buffer " << id; | 393 DVLOG(4) << "End of input buffer " << id; |
304 message_loop_->PostTask(FROM_HERE, base::Bind( | 394 message_loop_->PostTask(FROM_HERE, base::Bind( |
305 &Client::NotifyEndOfBitstreamBuffer, client_, id)); | 395 &Client::NotifyEndOfBitstreamBuffer, client_, id)); |
306 | 396 |
307 --num_stream_bufs_at_decoder_; | 397 --num_stream_bufs_at_decoder_; |
308 TRACE_COUNTER1("Video Decoder", "Stream buffers at decoder", | 398 TRACE_COUNTER1("Video Decoder", "Stream buffers at decoder", |
309 num_stream_bufs_at_decoder_); | 399 num_stream_bufs_at_decoder_); |
310 } | 400 } |
311 | 401 |
312 bool VaapiVideoDecodeAccelerator::FeedDecoderWithOutputSurfaces_Locked() { | 402 // TODO(posciak): refactor the whole class to remove sleeping in wait for |
| 403 // surfaces, and reschedule DecodeTask instead. |
| 404 bool VaapiVideoDecodeAccelerator::WaitForSurfaces_Locked() { |
313 lock_.AssertAcquired(); | 405 lock_.AssertAcquired(); |
314 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread()); | 406 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread()); |
315 | 407 |
316 while (available_va_surfaces_.empty() && | 408 while (available_va_surfaces_.empty() && |
317 (state_ == kDecoding || state_ == kFlushing || state_ == kIdle)) { | 409 (state_ == kDecoding || state_ == kFlushing || state_ == kIdle)) { |
318 surfaces_available_.Wait(); | 410 surfaces_available_.Wait(); |
319 } | 411 } |
320 | 412 |
321 if (state_ != kDecoding && state_ != kFlushing && state_ != kIdle) | 413 if (state_ != kDecoding && state_ != kFlushing && state_ != kIdle) |
322 return false; | 414 return false; |
323 | 415 |
324 DCHECK(!awaiting_va_surfaces_recycle_); | |
325 while (!available_va_surfaces_.empty()) { | |
326 scoped_refptr<VASurface> va_surface( | |
327 new VASurface(available_va_surfaces_.front(), requested_pic_size_, | |
328 va_surface_release_cb_)); | |
329 available_va_surfaces_.pop_front(); | |
330 decoder_->ReuseSurface(va_surface); | |
331 } | |
332 | |
333 return true; | 416 return true; |
334 } | 417 } |
335 | 418 |
336 void VaapiVideoDecodeAccelerator::DecodeTask() { | 419 void VaapiVideoDecodeAccelerator::DecodeTask() { |
337 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread()); | 420 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread()); |
338 TRACE_EVENT0("Video Decoder", "VAVDA::DecodeTask"); | 421 TRACE_EVENT0("Video Decoder", "VAVDA::DecodeTask"); |
339 base::AutoLock auto_lock(lock_); | 422 base::AutoLock auto_lock(lock_); |
340 | 423 |
341 if (state_ != kDecoding) | 424 if (state_ != kDecoding) |
342 return; | 425 return; |
343 | 426 |
344 // Main decode task. | 427 // Main decode task. |
345 DVLOG(4) << "Decode task"; | 428 DVLOG(4) << "Decode task"; |
346 | 429 |
347 // Try to decode what stream data is (still) in the decoder until we run out | 430 // Try to decode what stream data is (still) in the decoder until we run out |
348 // of it. | 431 // of it. |
349 while (GetInputBuffer_Locked()) { | 432 while (GetInputBuffer_Locked()) { |
350 DCHECK(curr_input_buffer_.get()); | 433 DCHECK(curr_input_buffer_.get()); |
351 | 434 |
352 VaapiH264Decoder::DecResult res; | 435 AcceleratedVideoDecoder::DecodeResult res; |
353 { | 436 { |
354 // We are OK releasing the lock here, as decoder never calls our methods | 437 // We are OK releasing the lock here, as decoder never calls our methods |
355 // directly and we will reacquire the lock before looking at state again. | 438 // directly and we will reacquire the lock before looking at state again. |
356 // This is the main decode function of the decoder and while keeping | 439 // This is the main decode function of the decoder and while keeping |
357 // the lock for its duration would be fine, it would defeat the purpose | 440 // the lock for its duration would be fine, it would defeat the purpose |
358 // of having a separate decoder thread. | 441 // of having a separate decoder thread. |
359 base::AutoUnlock auto_unlock(lock_); | 442 base::AutoUnlock auto_unlock(lock_); |
360 res = decoder_->Decode(); | 443 res = decoder_->Decode(); |
361 } | 444 } |
362 | 445 |
363 switch (res) { | 446 switch (res) { |
364 case VaapiH264Decoder::kAllocateNewSurfaces: | 447 case AcceleratedVideoDecoder::kAllocateNewSurfaces: |
365 DVLOG(1) << "Decoder requesting a new set of surfaces"; | 448 DVLOG(1) << "Decoder requesting a new set of surfaces"; |
366 message_loop_->PostTask(FROM_HERE, base::Bind( | 449 message_loop_->PostTask(FROM_HERE, base::Bind( |
367 &VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange, weak_this_, | 450 &VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange, weak_this_, |
368 decoder_->GetRequiredNumOfPictures(), | 451 decoder_->GetRequiredNumOfPictures(), |
369 decoder_->GetPicSize())); | 452 decoder_->GetPicSize())); |
370 // We'll get rescheduled once ProvidePictureBuffers() finishes. | 453 // We'll get rescheduled once ProvidePictureBuffers() finishes. |
371 return; | 454 return; |
372 | 455 |
373 case VaapiH264Decoder::kRanOutOfStreamData: | 456 case AcceleratedVideoDecoder::kRanOutOfStreamData: |
374 ReturnCurrInputBuffer_Locked(); | 457 ReturnCurrInputBuffer_Locked(); |
375 break; | 458 break; |
376 | 459 |
377 case VaapiH264Decoder::kRanOutOfSurfaces: | 460 case AcceleratedVideoDecoder::kRanOutOfSurfaces: |
378 // No more output buffers in the decoder, try getting more or go to | 461 // No more output buffers in the decoder, try getting more or go to |
379 // sleep waiting for them. | 462 // sleep waiting for them. |
380 if (!FeedDecoderWithOutputSurfaces_Locked()) | 463 if (!WaitForSurfaces_Locked()) |
381 return; | 464 return; |
382 | 465 |
383 break; | 466 break; |
384 | 467 |
385 case VaapiH264Decoder::kDecodeError: | 468 case AcceleratedVideoDecoder::kDecodeError: |
386 RETURN_AND_NOTIFY_ON_FAILURE(false, "Error decoding stream", | 469 RETURN_AND_NOTIFY_ON_FAILURE(false, "Error decoding stream", |
387 PLATFORM_FAILURE, ); | 470 PLATFORM_FAILURE, ); |
388 return; | 471 return; |
389 } | 472 } |
390 } | 473 } |
391 } | 474 } |
392 | 475 |
393 void VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange(size_t num_pics, | 476 void VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange(size_t num_pics, |
394 gfx::Size size) { | 477 gfx::Size size) { |
395 DCHECK_EQ(message_loop_, base::MessageLoop::current()); | 478 DCHECK_EQ(message_loop_, base::MessageLoop::current()); |
(...skipping 345 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
741 void VaapiVideoDecodeAccelerator::Destroy() { | 824 void VaapiVideoDecodeAccelerator::Destroy() { |
742 DCHECK_EQ(message_loop_, base::MessageLoop::current()); | 825 DCHECK_EQ(message_loop_, base::MessageLoop::current()); |
743 Cleanup(); | 826 Cleanup(); |
744 delete this; | 827 delete this; |
745 } | 828 } |
746 | 829 |
747 bool VaapiVideoDecodeAccelerator::CanDecodeOnIOThread() { | 830 bool VaapiVideoDecodeAccelerator::CanDecodeOnIOThread() { |
748 return false; | 831 return false; |
749 } | 832 } |
750 | 833 |
| 834 bool VaapiVideoDecodeAccelerator::DecodeSurface( |
| 835 const scoped_refptr<VaapiDecodeSurface>& dec_surface) { |
| 836 if (!vaapi_wrapper_->ExecuteAndDestroyPendingBuffers( |
| 837 dec_surface->va_surface()->id())) { |
| 838 DVLOG(1) << "Failed decoding picture"; |
| 839 return false; |
| 840 } |
| 841 |
| 842 return true; |
| 843 } |
| 844 |
| 845 void VaapiVideoDecodeAccelerator::SurfaceReady( |
| 846 const scoped_refptr<VaapiDecodeSurface>& dec_surface) { |
| 847 if (message_loop_ != base::MessageLoop::current()) { |
| 848 message_loop_->PostTask( |
| 849 FROM_HERE, base::Bind(&VaapiVideoDecodeAccelerator::SurfaceReady, |
| 850 weak_this_, dec_surface)); |
| 851 return; |
| 852 } |
| 853 |
| 854 DCHECK(!awaiting_va_surfaces_recycle_); |
| 855 |
| 856 { |
| 857 base::AutoLock auto_lock(lock_); |
| 858 // Drop any requests to output if we are resetting or being destroyed. |
| 859 if (state_ == kResetting || state_ == kDestroying) |
| 860 return; |
| 861 } |
| 862 |
| 863 pending_output_cbs_.push( |
| 864 base::Bind(&VaapiVideoDecodeAccelerator::OutputPicture, weak_this_, |
| 865 dec_surface->va_surface(), dec_surface->bitstream_id())); |
| 866 |
| 867 TryOutputSurface(); |
| 868 } |
| 869 |
| 870 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> |
| 871 VaapiVideoDecodeAccelerator::CreateSurface() { |
| 872 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread()); |
| 873 base::AutoLock auto_lock(lock_); |
| 874 |
| 875 if (available_va_surfaces_.empty()) |
| 876 return nullptr; |
| 877 |
| 878 DCHECK(!awaiting_va_surfaces_recycle_); |
| 879 scoped_refptr<VASurface> va_surface( |
| 880 new VASurface(available_va_surfaces_.front(), requested_pic_size_, |
| 881 va_surface_release_cb_)); |
| 882 available_va_surfaces_.pop_front(); |
| 883 |
| 884 scoped_refptr<VaapiDecodeSurface> dec_surface = |
| 885 new VaapiDecodeSurface(curr_input_buffer_->id, va_surface); |
| 886 |
| 887 return dec_surface; |
| 888 } |
| 889 |
| 890 VaapiVideoDecodeAccelerator::VaapiH264Accelerator::VaapiH264Accelerator( |
| 891 VaapiVideoDecodeAccelerator* vaapi_dec, |
| 892 VaapiWrapper* vaapi_wrapper) |
| 893 : vaapi_wrapper_(vaapi_wrapper), vaapi_dec_(vaapi_dec) { |
| 894 DCHECK(vaapi_wrapper_); |
| 895 DCHECK(vaapi_dec_); |
| 896 } |
| 897 |
| 898 VaapiVideoDecodeAccelerator::VaapiH264Accelerator::~VaapiH264Accelerator() { |
| 899 } |
| 900 |
| 901 scoped_refptr<H264Picture> |
| 902 VaapiVideoDecodeAccelerator::VaapiH264Accelerator::CreateH264Picture() { |
| 903 scoped_refptr<VaapiDecodeSurface> va_surface = vaapi_dec_->CreateSurface(); |
| 904 if (!va_surface) |
| 905 return nullptr; |
| 906 |
| 907 return new VaapiH264Picture(va_surface); |
| 908 } |
| 909 |
| 910 // Fill |va_pic| with default/neutral values. |
| 911 static void InitVAPicture(VAPictureH264* va_pic) { |
| 912 memset(va_pic, 0, sizeof(*va_pic)); |
| 913 va_pic->picture_id = VA_INVALID_ID; |
| 914 va_pic->flags = VA_PICTURE_H264_INVALID; |
| 915 } |
| 916 |
| 917 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::SubmitFrameMetadata( |
| 918 const media::H264SPS* sps, |
| 919 const media::H264PPS* pps, |
| 920 const H264DPB& dpb, |
| 921 const H264Picture::Vector& ref_pic_listp0, |
| 922 const H264Picture::Vector& ref_pic_listb0, |
| 923 const H264Picture::Vector& ref_pic_listb1, |
| 924 const scoped_refptr<H264Picture>& pic) { |
| 925 VAPictureParameterBufferH264 pic_param; |
| 926 memset(&pic_param, 0, sizeof(pic_param)); |
| 927 |
| 928 #define FROM_SPS_TO_PP(a) pic_param.a = sps->a; |
| 929 #define FROM_SPS_TO_PP2(a, b) pic_param.b = sps->a; |
| 930 FROM_SPS_TO_PP2(pic_width_in_mbs_minus1, picture_width_in_mbs_minus1); |
| 931 // This assumes non-interlaced video |
| 932 FROM_SPS_TO_PP2(pic_height_in_map_units_minus1, picture_height_in_mbs_minus1); |
| 933 FROM_SPS_TO_PP(bit_depth_luma_minus8); |
| 934 FROM_SPS_TO_PP(bit_depth_chroma_minus8); |
| 935 #undef FROM_SPS_TO_PP |
| 936 #undef FROM_SPS_TO_PP2 |
| 937 |
| 938 #define FROM_SPS_TO_PP_SF(a) pic_param.seq_fields.bits.a = sps->a; |
| 939 #define FROM_SPS_TO_PP_SF2(a, b) pic_param.seq_fields.bits.b = sps->a; |
| 940 FROM_SPS_TO_PP_SF(chroma_format_idc); |
| 941 FROM_SPS_TO_PP_SF2(separate_colour_plane_flag, |
| 942 residual_colour_transform_flag); |
| 943 FROM_SPS_TO_PP_SF(gaps_in_frame_num_value_allowed_flag); |
| 944 FROM_SPS_TO_PP_SF(frame_mbs_only_flag); |
| 945 FROM_SPS_TO_PP_SF(mb_adaptive_frame_field_flag); |
| 946 FROM_SPS_TO_PP_SF(direct_8x8_inference_flag); |
| 947 pic_param.seq_fields.bits.MinLumaBiPredSize8x8 = (sps->level_idc >= 31); |
| 948 FROM_SPS_TO_PP_SF(log2_max_frame_num_minus4); |
| 949 FROM_SPS_TO_PP_SF(pic_order_cnt_type); |
| 950 FROM_SPS_TO_PP_SF(log2_max_pic_order_cnt_lsb_minus4); |
| 951 FROM_SPS_TO_PP_SF(delta_pic_order_always_zero_flag); |
| 952 #undef FROM_SPS_TO_PP_SF |
| 953 #undef FROM_SPS_TO_PP_SF2 |
| 954 |
| 955 #define FROM_PPS_TO_PP(a) pic_param.a = pps->a; |
| 956 FROM_PPS_TO_PP(num_slice_groups_minus1); |
| 957 pic_param.slice_group_map_type = 0; |
| 958 pic_param.slice_group_change_rate_minus1 = 0; |
| 959 FROM_PPS_TO_PP(pic_init_qp_minus26); |
| 960 FROM_PPS_TO_PP(pic_init_qs_minus26); |
| 961 FROM_PPS_TO_PP(chroma_qp_index_offset); |
| 962 FROM_PPS_TO_PP(second_chroma_qp_index_offset); |
| 963 #undef FROM_PPS_TO_PP |
| 964 |
| 965 #define FROM_PPS_TO_PP_PF(a) pic_param.pic_fields.bits.a = pps->a; |
| 966 #define FROM_PPS_TO_PP_PF2(a, b) pic_param.pic_fields.bits.b = pps->a; |
| 967 FROM_PPS_TO_PP_PF(entropy_coding_mode_flag); |
| 968 FROM_PPS_TO_PP_PF(weighted_pred_flag); |
| 969 FROM_PPS_TO_PP_PF(weighted_bipred_idc); |
| 970 FROM_PPS_TO_PP_PF(transform_8x8_mode_flag); |
| 971 |
| 972 pic_param.pic_fields.bits.field_pic_flag = 0; |
| 973 FROM_PPS_TO_PP_PF(constrained_intra_pred_flag); |
| 974 FROM_PPS_TO_PP_PF2(bottom_field_pic_order_in_frame_present_flag, |
| 975 pic_order_present_flag); |
| 976 FROM_PPS_TO_PP_PF(deblocking_filter_control_present_flag); |
| 977 FROM_PPS_TO_PP_PF(redundant_pic_cnt_present_flag); |
| 978 pic_param.pic_fields.bits.reference_pic_flag = pic->ref; |
| 979 #undef FROM_PPS_TO_PP_PF |
| 980 #undef FROM_PPS_TO_PP_PF2 |
| 981 |
| 982 pic_param.frame_num = pic->frame_num; |
| 983 |
| 984 InitVAPicture(&pic_param.CurrPic); |
| 985 FillVAPicture(&pic_param.CurrPic, pic); |
| 986 |
| 987 // Init reference pictures' array. |
| 988 for (int i = 0; i < 16; ++i) |
| 989 InitVAPicture(&pic_param.ReferenceFrames[i]); |
| 990 |
| 991 // And fill it with picture info from DPB. |
| 992 FillVARefFramesFromDPB(dpb, pic_param.ReferenceFrames, |
| 993 arraysize(pic_param.ReferenceFrames)); |
| 994 |
| 995 pic_param.num_ref_frames = sps->max_num_ref_frames; |
| 996 |
| 997 if (!vaapi_wrapper_->SubmitBuffer(VAPictureParameterBufferType, |
| 998 sizeof(pic_param), |
| 999 &pic_param)) |
| 1000 return false; |
| 1001 |
| 1002 VAIQMatrixBufferH264 iq_matrix_buf; |
| 1003 memset(&iq_matrix_buf, 0, sizeof(iq_matrix_buf)); |
| 1004 |
| 1005 if (pps->pic_scaling_matrix_present_flag) { |
| 1006 for (int i = 0; i < 6; ++i) { |
| 1007 for (int j = 0; j < 16; ++j) |
| 1008 iq_matrix_buf.ScalingList4x4[i][j] = pps->scaling_list4x4[i][j]; |
| 1009 } |
| 1010 |
| 1011 for (int i = 0; i < 2; ++i) { |
| 1012 for (int j = 0; j < 64; ++j) |
| 1013 iq_matrix_buf.ScalingList8x8[i][j] = pps->scaling_list8x8[i][j]; |
| 1014 } |
| 1015 } else { |
| 1016 for (int i = 0; i < 6; ++i) { |
| 1017 for (int j = 0; j < 16; ++j) |
| 1018 iq_matrix_buf.ScalingList4x4[i][j] = sps->scaling_list4x4[i][j]; |
| 1019 } |
| 1020 |
| 1021 for (int i = 0; i < 2; ++i) { |
| 1022 for (int j = 0; j < 64; ++j) |
| 1023 iq_matrix_buf.ScalingList8x8[i][j] = sps->scaling_list8x8[i][j]; |
| 1024 } |
| 1025 } |
| 1026 |
| 1027 return vaapi_wrapper_->SubmitBuffer(VAIQMatrixBufferType, |
| 1028 sizeof(iq_matrix_buf), |
| 1029 &iq_matrix_buf); |
| 1030 } |
| 1031 |
| 1032 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::SubmitSlice( |
| 1033 const media::H264PPS* pps, |
| 1034 const media::H264SliceHeader* slice_hdr, |
| 1035 const H264Picture::Vector& ref_pic_list0, |
| 1036 const H264Picture::Vector& ref_pic_list1, |
| 1037 const scoped_refptr<H264Picture>& pic, |
| 1038 const uint8_t* data, |
| 1039 size_t size) { |
| 1040 VASliceParameterBufferH264 slice_param; |
| 1041 memset(&slice_param, 0, sizeof(slice_param)); |
| 1042 |
| 1043 slice_param.slice_data_size = slice_hdr->nalu_size; |
| 1044 slice_param.slice_data_offset = 0; |
| 1045 slice_param.slice_data_flag = VA_SLICE_DATA_FLAG_ALL; |
| 1046 slice_param.slice_data_bit_offset = slice_hdr->header_bit_size; |
| 1047 |
| 1048 #define SHDRToSP(a) slice_param.a = slice_hdr->a; |
| 1049 SHDRToSP(first_mb_in_slice); |
| 1050 slice_param.slice_type = slice_hdr->slice_type % 5; |
| 1051 SHDRToSP(direct_spatial_mv_pred_flag); |
| 1052 |
| 1053 // TODO posciak: make sure parser sets those even when override flags |
| 1054 // in slice header is off. |
| 1055 SHDRToSP(num_ref_idx_l0_active_minus1); |
| 1056 SHDRToSP(num_ref_idx_l1_active_minus1); |
| 1057 SHDRToSP(cabac_init_idc); |
| 1058 SHDRToSP(slice_qp_delta); |
| 1059 SHDRToSP(disable_deblocking_filter_idc); |
| 1060 SHDRToSP(slice_alpha_c0_offset_div2); |
| 1061 SHDRToSP(slice_beta_offset_div2); |
| 1062 |
| 1063 if (((slice_hdr->IsPSlice() || slice_hdr->IsSPSlice()) && |
| 1064 pps->weighted_pred_flag) || |
| 1065 (slice_hdr->IsBSlice() && pps->weighted_bipred_idc == 1)) { |
| 1066 SHDRToSP(luma_log2_weight_denom); |
| 1067 SHDRToSP(chroma_log2_weight_denom); |
| 1068 |
| 1069 SHDRToSP(luma_weight_l0_flag); |
| 1070 SHDRToSP(luma_weight_l1_flag); |
| 1071 |
| 1072 SHDRToSP(chroma_weight_l0_flag); |
| 1073 SHDRToSP(chroma_weight_l1_flag); |
| 1074 |
| 1075 for (int i = 0; i <= slice_param.num_ref_idx_l0_active_minus1; ++i) { |
| 1076 slice_param.luma_weight_l0[i] = |
| 1077 slice_hdr->pred_weight_table_l0.luma_weight[i]; |
| 1078 slice_param.luma_offset_l0[i] = |
| 1079 slice_hdr->pred_weight_table_l0.luma_offset[i]; |
| 1080 |
| 1081 for (int j = 0; j < 2; ++j) { |
| 1082 slice_param.chroma_weight_l0[i][j] = |
| 1083 slice_hdr->pred_weight_table_l0.chroma_weight[i][j]; |
| 1084 slice_param.chroma_offset_l0[i][j] = |
| 1085 slice_hdr->pred_weight_table_l0.chroma_offset[i][j]; |
| 1086 } |
| 1087 } |
| 1088 |
| 1089 if (slice_hdr->IsBSlice()) { |
| 1090 for (int i = 0; i <= slice_param.num_ref_idx_l1_active_minus1; ++i) { |
| 1091 slice_param.luma_weight_l1[i] = |
| 1092 slice_hdr->pred_weight_table_l1.luma_weight[i]; |
| 1093 slice_param.luma_offset_l1[i] = |
| 1094 slice_hdr->pred_weight_table_l1.luma_offset[i]; |
| 1095 |
| 1096 for (int j = 0; j < 2; ++j) { |
| 1097 slice_param.chroma_weight_l1[i][j] = |
| 1098 slice_hdr->pred_weight_table_l1.chroma_weight[i][j]; |
| 1099 slice_param.chroma_offset_l1[i][j] = |
| 1100 slice_hdr->pred_weight_table_l1.chroma_offset[i][j]; |
| 1101 } |
| 1102 } |
| 1103 } |
| 1104 } |
| 1105 |
| 1106 static_assert( |
| 1107 arraysize(slice_param.RefPicList0) == arraysize(slice_param.RefPicList1), |
| 1108 "Invalid RefPicList sizes"); |
| 1109 |
| 1110 for (size_t i = 0; i < arraysize(slice_param.RefPicList0); ++i) { |
| 1111 InitVAPicture(&slice_param.RefPicList0[i]); |
| 1112 InitVAPicture(&slice_param.RefPicList1[i]); |
| 1113 } |
| 1114 |
| 1115 for (size_t i = 0; |
| 1116 i < ref_pic_list0.size() && i < arraysize(slice_param.RefPicList0); |
| 1117 ++i) { |
| 1118 if (ref_pic_list0[i]) |
| 1119 FillVAPicture(&slice_param.RefPicList0[i], ref_pic_list0[i]); |
| 1120 } |
| 1121 for (size_t i = 0; |
| 1122 i < ref_pic_list1.size() && i < arraysize(slice_param.RefPicList1); |
| 1123 ++i) { |
| 1124 if (ref_pic_list1[i]) |
| 1125 FillVAPicture(&slice_param.RefPicList1[i], ref_pic_list1[i]); |
| 1126 } |
| 1127 |
| 1128 if (!vaapi_wrapper_->SubmitBuffer(VASliceParameterBufferType, |
| 1129 sizeof(slice_param), |
| 1130 &slice_param)) |
| 1131 return false; |
| 1132 |
| 1133 // Can't help it, blame libva... |
| 1134 void* non_const_ptr = const_cast<uint8*>(data); |
| 1135 return vaapi_wrapper_->SubmitBuffer(VASliceDataBufferType, size, |
| 1136 non_const_ptr); |
| 1137 } |
| 1138 |
| 1139 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::SubmitDecode( |
| 1140 const scoped_refptr<H264Picture>& pic) { |
| 1141 DVLOG(4) << "Decoding POC " << pic->pic_order_cnt; |
| 1142 scoped_refptr<VaapiDecodeSurface> dec_surface = |
| 1143 H264PictureToVaapiDecodeSurface(pic); |
| 1144 |
| 1145 return vaapi_dec_->DecodeSurface(dec_surface); |
| 1146 } |
| 1147 |
| 1148 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::OutputPicture( |
| 1149 const scoped_refptr<H264Picture>& pic) { |
| 1150 scoped_refptr<VaapiDecodeSurface> dec_surface = |
| 1151 H264PictureToVaapiDecodeSurface(pic); |
| 1152 |
| 1153 vaapi_dec_->SurfaceReady(dec_surface); |
| 1154 |
| 1155 return true; |
| 1156 } |
| 1157 |
| 1158 void VaapiVideoDecodeAccelerator::VaapiH264Accelerator::Reset() { |
| 1159 vaapi_wrapper_->DestroyPendingBuffers(); |
| 1160 } |
| 1161 |
| 1162 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> |
| 1163 VaapiVideoDecodeAccelerator::VaapiH264Accelerator:: |
| 1164 H264PictureToVaapiDecodeSurface(const scoped_refptr<H264Picture>& pic) { |
| 1165 VaapiH264Picture* vaapi_pic = pic->AsVaapiH264Picture(); |
| 1166 CHECK(vaapi_pic); |
| 1167 return vaapi_pic->dec_surface(); |
| 1168 } |
| 1169 |
| 1170 void VaapiVideoDecodeAccelerator::VaapiH264Accelerator::FillVAPicture( |
| 1171 VAPictureH264* va_pic, |
| 1172 scoped_refptr<H264Picture> pic) { |
| 1173 scoped_refptr<VaapiDecodeSurface> dec_surface = |
| 1174 H264PictureToVaapiDecodeSurface(pic); |
| 1175 |
| 1176 va_pic->picture_id = dec_surface->va_surface()->id(); |
| 1177 va_pic->frame_idx = pic->frame_num; |
| 1178 va_pic->flags = 0; |
| 1179 |
| 1180 switch (pic->field) { |
| 1181 case H264Picture::FIELD_NONE: |
| 1182 break; |
| 1183 case H264Picture::FIELD_TOP: |
| 1184 va_pic->flags |= VA_PICTURE_H264_TOP_FIELD; |
| 1185 break; |
| 1186 case H264Picture::FIELD_BOTTOM: |
| 1187 va_pic->flags |= VA_PICTURE_H264_BOTTOM_FIELD; |
| 1188 break; |
| 1189 } |
| 1190 |
| 1191 if (pic->ref) { |
| 1192 va_pic->flags |= pic->long_term ? VA_PICTURE_H264_LONG_TERM_REFERENCE |
| 1193 : VA_PICTURE_H264_SHORT_TERM_REFERENCE; |
| 1194 } |
| 1195 |
| 1196 va_pic->TopFieldOrderCnt = pic->top_field_order_cnt; |
| 1197 va_pic->BottomFieldOrderCnt = pic->bottom_field_order_cnt; |
| 1198 } |
| 1199 |
| 1200 int VaapiVideoDecodeAccelerator::VaapiH264Accelerator::FillVARefFramesFromDPB( |
| 1201 const H264DPB& dpb, |
| 1202 VAPictureH264* va_pics, |
| 1203 int num_pics) { |
| 1204 H264Picture::Vector::const_reverse_iterator rit; |
| 1205 int i; |
| 1206 |
| 1207 // Return reference frames in reverse order of insertion. |
| 1208 // Libva does not document this, but other implementations (e.g. mplayer) |
| 1209 // do it this way as well. |
| 1210 for (rit = dpb.rbegin(), i = 0; rit != dpb.rend() && i < num_pics; ++rit) { |
| 1211 if ((*rit)->ref) |
| 1212 FillVAPicture(&va_pics[i++], *rit); |
| 1213 } |
| 1214 |
| 1215 return i; |
| 1216 } |
| 1217 |
751 // static | 1218 // static |
752 media::VideoDecodeAccelerator::SupportedProfiles | 1219 media::VideoDecodeAccelerator::SupportedProfiles |
753 VaapiVideoDecodeAccelerator::GetSupportedProfiles() { | 1220 VaapiVideoDecodeAccelerator::GetSupportedProfiles() { |
754 return VaapiWrapper::GetSupportedDecodeProfiles(); | 1221 return VaapiWrapper::GetSupportedDecodeProfiles(); |
755 } | 1222 } |
756 | 1223 |
757 } // namespace content | 1224 } // namespace content |
OLD | NEW |