Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(61)

Side by Side Diff: gpu/command_buffer/service/async_pixel_transfer_manager_egl.cc

Issue 16175005: GPU: Replace AsyncPixelTransferState with AsyncPixelTransferDelegate. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Rebase Created 7 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2013 The Chromium Authors. All rights reserved. 1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "gpu/command_buffer/service/async_pixel_transfer_manager_egl.h" 5 #include "gpu/command_buffer/service/async_pixel_transfer_manager_egl.h"
6 6
7 #include <list> 7 #include <list>
8 #include <string> 8 #include <string>
9 9
10 #include "base/bind.h" 10 #include "base/bind.h"
(...skipping 371 matching lines...) Expand 10 before | Expand all | Expand 10 after
382 // and the client can safely use the texture. This occurs 382 // and the client can safely use the texture. This occurs
383 // during BindCompletedAsyncTransfers(). 383 // during BindCompletedAsyncTransfers().
384 base::Closure bind_callback_; 384 base::Closure bind_callback_;
385 385
386 // Customize when we block on fences (these are work-arounds). 386 // Customize when we block on fences (these are work-arounds).
387 bool wait_for_uploads_; 387 bool wait_for_uploads_;
388 bool wait_for_creation_; 388 bool wait_for_creation_;
389 bool use_image_preserved_; 389 bool use_image_preserved_;
390 }; 390 };
391 391
392 // EGL needs thread-safe ref-counting, so this just wraps
393 // an internal thread-safe ref-counted state object.
394 class AsyncTransferStateImpl : public AsyncPixelTransferState {
395 public:
396 AsyncTransferStateImpl(GLuint texture_id,
397 const AsyncTexImage2DParams& define_params,
398 bool wait_for_uploads,
399 bool wait_for_creation,
400 bool use_image_preserved)
401 : internal_(new TransferStateInternal(texture_id,
402 define_params,
403 wait_for_uploads,
404 wait_for_creation,
405 use_image_preserved)) {
406 }
407
408 virtual bool TransferIsInProgress() OVERRIDE {
409 return internal_->TransferIsInProgress();
410 }
411
412 void BindTransfer() {
413 internal_->BindTransfer();
414 }
415
416 scoped_refptr<TransferStateInternal> internal_;
417
418 private:
419 virtual ~AsyncTransferStateImpl() {}
420 };
421
422 } // namespace 392 } // namespace
423 393
424 // Class which handles async pixel transfers using EGLImageKHR and another 394 // Class which handles async pixel transfers using EGLImageKHR and another
425 // upload thread 395 // upload thread
426 class AsyncPixelTransferDelegateEGL 396 class AsyncPixelTransferDelegateEGL
427 : public AsyncPixelTransferDelegate, 397 : public AsyncPixelTransferDelegate,
428 public base::SupportsWeakPtr<AsyncPixelTransferDelegateEGL> { 398 public base::SupportsWeakPtr<AsyncPixelTransferDelegateEGL> {
429 public: 399 public:
430 explicit AsyncPixelTransferDelegateEGL( 400 AsyncPixelTransferDelegateEGL(
431 AsyncPixelTransferManagerEGL::SharedState* shared_state); 401 AsyncPixelTransferManagerEGL::SharedState* shared_state,
402 GLuint texture_id,
403 const AsyncTexImage2DParams& define_params);
432 virtual ~AsyncPixelTransferDelegateEGL(); 404 virtual ~AsyncPixelTransferDelegateEGL();
433 405
406 void BindTransfer() { state_->BindTransfer(); }
407
434 // Implement AsyncPixelTransferDelegate: 408 // Implement AsyncPixelTransferDelegate:
435 virtual AsyncPixelTransferState* CreatePixelTransferState(
436 GLuint texture_id,
437 const AsyncTexImage2DParams& define_params) OVERRIDE;
438 virtual void AsyncTexImage2D( 409 virtual void AsyncTexImage2D(
439 AsyncPixelTransferState* state,
440 const AsyncTexImage2DParams& tex_params, 410 const AsyncTexImage2DParams& tex_params,
441 const AsyncMemoryParams& mem_params, 411 const AsyncMemoryParams& mem_params,
442 const base::Closure& bind_callback) OVERRIDE; 412 const base::Closure& bind_callback) OVERRIDE;
443 virtual void AsyncTexSubImage2D( 413 virtual void AsyncTexSubImage2D(
444 AsyncPixelTransferState* state,
445 const AsyncTexSubImage2DParams& tex_params, 414 const AsyncTexSubImage2DParams& tex_params,
446 const AsyncMemoryParams& mem_params) OVERRIDE; 415 const AsyncMemoryParams& mem_params) OVERRIDE;
447 virtual void WaitForTransferCompletion( 416 virtual bool TransferIsInProgress() OVERRIDE;
448 AsyncPixelTransferState* state) OVERRIDE; 417 virtual void WaitForTransferCompletion() OVERRIDE;
449 418
450 private: 419 private:
451 // Returns true if a work-around was used. 420 // Returns true if a work-around was used.
452 bool WorkAroundAsyncTexImage2D( 421 bool WorkAroundAsyncTexImage2D(
453 AsyncPixelTransferState* state,
454 const AsyncTexImage2DParams& tex_params, 422 const AsyncTexImage2DParams& tex_params,
455 const AsyncMemoryParams& mem_params, 423 const AsyncMemoryParams& mem_params,
456 const base::Closure& bind_callback); 424 const base::Closure& bind_callback);
457 bool WorkAroundAsyncTexSubImage2D( 425 bool WorkAroundAsyncTexSubImage2D(
458 AsyncPixelTransferState* state,
459 const AsyncTexSubImage2DParams& tex_params, 426 const AsyncTexSubImage2DParams& tex_params,
460 const AsyncMemoryParams& mem_params); 427 const AsyncMemoryParams& mem_params);
461 428
462 // A raw pointer is safe because the SharedState is owned by the Manager, 429 // A raw pointer is safe because the SharedState is owned by the Manager,
463 // which owns this Delegate. 430 // which owns this Delegate.
464 AsyncPixelTransferManagerEGL::SharedState* shared_state_; 431 AsyncPixelTransferManagerEGL::SharedState* shared_state_;
432 scoped_refptr<TransferStateInternal> state_;
465 433
466 DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferDelegateEGL); 434 DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferDelegateEGL);
467 }; 435 };
468 436
469 AsyncPixelTransferDelegateEGL::AsyncPixelTransferDelegateEGL( 437 AsyncPixelTransferDelegateEGL::AsyncPixelTransferDelegateEGL(
470 AsyncPixelTransferManagerEGL::SharedState* shared_state) 438 AsyncPixelTransferManagerEGL::SharedState* shared_state,
439 GLuint texture_id,
440 const AsyncTexImage2DParams& define_params)
471 : shared_state_(shared_state) { 441 : shared_state_(shared_state) {
472 }
473
474 AsyncPixelTransferDelegateEGL::~AsyncPixelTransferDelegateEGL() {}
475
476 AsyncPixelTransferState* AsyncPixelTransferDelegateEGL::
477 CreatePixelTransferState(GLuint texture_id,
478 const AsyncTexImage2DParams& define_params) {
479 // We can't wait on uploads on imagination (it can take 200ms+). 442 // We can't wait on uploads on imagination (it can take 200ms+).
480 // In practice, they are complete when the CPU glTexSubImage2D completes. 443 // In practice, they are complete when the CPU glTexSubImage2D completes.
481 bool wait_for_uploads = !shared_state_->is_imagination; 444 bool wait_for_uploads = !shared_state_->is_imagination;
482 445
483 // Qualcomm runs into texture corruption problems if the same texture is 446 // Qualcomm runs into texture corruption problems if the same texture is
484 // uploaded to with both async and normal uploads. Synchronize after EGLImage 447 // uploaded to with both async and normal uploads. Synchronize after EGLImage
485 // creation on the main thread as a work-around. 448 // creation on the main thread as a work-around.
486 bool wait_for_creation = shared_state_->is_qualcomm; 449 bool wait_for_creation = shared_state_->is_qualcomm;
487 450
488 // Qualcomm has a race when using image_preserved=FALSE, 451 // Qualcomm has a race when using image_preserved=FALSE,
489 // which can result in black textures even after the first upload. 452 // which can result in black textures even after the first upload.
490 // Since using FALSE is mainly for performance (to avoid layout changes), 453 // Since using FALSE is mainly for performance (to avoid layout changes),
491 // but Qualcomm itself doesn't seem to get any performance benefit, 454 // but Qualcomm itself doesn't seem to get any performance benefit,
492 // we just using image_preservedd=TRUE on Qualcomm as a work-around. 455 // we just using image_preservedd=TRUE on Qualcomm as a work-around.
493 bool use_image_preserved = 456 bool use_image_preserved =
494 shared_state_->is_qualcomm || shared_state_->is_imagination; 457 shared_state_->is_qualcomm || shared_state_->is_imagination;
495 458
496 return new AsyncTransferStateImpl(texture_id, 459 state_ = new TransferStateInternal(texture_id,
497 define_params, 460 define_params,
498 wait_for_uploads, 461 wait_for_uploads,
499 wait_for_creation, 462 wait_for_creation,
500 use_image_preserved); 463 use_image_preserved);
501 } 464 }
502 465
503 void AsyncPixelTransferDelegateEGL::WaitForTransferCompletion( 466 AsyncPixelTransferDelegateEGL::~AsyncPixelTransferDelegateEGL() {}
504 AsyncPixelTransferState* transfer_state) {
505 scoped_refptr<TransferStateInternal> state =
506 static_cast<AsyncTransferStateImpl*>(transfer_state)->internal_.get();
507 DCHECK(state.get());
508 DCHECK(state->texture_id_);
509 467
510 if (state->TransferIsInProgress()) { 468 bool AsyncPixelTransferDelegateEGL::TransferIsInProgress() {
469 return state_->TransferIsInProgress();
470 }
471
472 void AsyncPixelTransferDelegateEGL::WaitForTransferCompletion() {
473 if (state_->TransferIsInProgress()) {
511 #if defined(OS_ANDROID) || defined(OS_LINUX) 474 #if defined(OS_ANDROID) || defined(OS_LINUX)
512 g_transfer_thread.Pointer()->SetPriority(base::kThreadPriority_Display); 475 g_transfer_thread.Pointer()->SetPriority(base::kThreadPriority_Display);
513 #endif 476 #endif
514 477
515 state->WaitForTransferCompletion(); 478 state_->WaitForTransferCompletion();
516 DCHECK(!state->TransferIsInProgress()); 479 DCHECK(!state_->TransferIsInProgress());
517 480
518 #if defined(OS_ANDROID) || defined(OS_LINUX) 481 #if defined(OS_ANDROID) || defined(OS_LINUX)
519 g_transfer_thread.Pointer()->SetPriority(base::kThreadPriority_Background); 482 g_transfer_thread.Pointer()->SetPriority(base::kThreadPriority_Background);
520 #endif 483 #endif
521 } 484 }
522 } 485 }
523 486
524 void AsyncPixelTransferDelegateEGL::AsyncTexImage2D( 487 void AsyncPixelTransferDelegateEGL::AsyncTexImage2D(
525 AsyncPixelTransferState* transfer_state,
526 const AsyncTexImage2DParams& tex_params, 488 const AsyncTexImage2DParams& tex_params,
527 const AsyncMemoryParams& mem_params, 489 const AsyncMemoryParams& mem_params,
528 const base::Closure& bind_callback) { 490 const base::Closure& bind_callback) {
529 if (WorkAroundAsyncTexImage2D(transfer_state, tex_params, 491 if (WorkAroundAsyncTexImage2D(tex_params, mem_params, bind_callback))
530 mem_params, bind_callback))
531 return; 492 return;
532 493
533 scoped_refptr<TransferStateInternal> state =
534 static_cast<AsyncTransferStateImpl*>(transfer_state)->internal_.get();
535 DCHECK(mem_params.shared_memory); 494 DCHECK(mem_params.shared_memory);
536 DCHECK_LE(mem_params.shm_data_offset + mem_params.shm_data_size, 495 DCHECK_LE(mem_params.shm_data_offset + mem_params.shm_data_size,
537 mem_params.shm_size); 496 mem_params.shm_size);
538 DCHECK(state.get()); 497 DCHECK(!state_->TransferIsInProgress());
539 DCHECK(state->texture_id_); 498 DCHECK_EQ(state_->egl_image_, EGL_NO_IMAGE_KHR);
540 DCHECK(!state->TransferIsInProgress());
541 DCHECK_EQ(state->egl_image_, EGL_NO_IMAGE_KHR);
542 DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target); 499 DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
543 DCHECK_EQ(tex_params.level, 0); 500 DCHECK_EQ(tex_params.level, 0);
544 501
545 // Mark the transfer in progress and save the late bind 502 // Mark the transfer in progress and save the late bind
546 // callback, so we can notify the client when it is bound. 503 // callback, so we can notify the client when it is bound.
547 shared_state_->pending_allocations.push_back(transfer_state->AsWeakPtr()); 504 shared_state_->pending_allocations.push_back(AsWeakPtr());
548 state->bind_callback_ = bind_callback; 505 state_->bind_callback_ = bind_callback;
549 506
550 // Mark the transfer in progress. 507 // Mark the transfer in progress.
551 state->MarkAsTransferIsInProgress(); 508 state_->MarkAsTransferIsInProgress();
552 509
553 // Duplicate the shared memory so there is no way we can get 510 // Duplicate the shared memory so there is no way we can get
554 // a use-after-free of the raw pixels. 511 // a use-after-free of the raw pixels.
555 transfer_message_loop_proxy()->PostTask(FROM_HERE, 512 transfer_message_loop_proxy()->PostTask(FROM_HERE,
556 base::Bind( 513 base::Bind(
557 &TransferStateInternal::PerformAsyncTexImage2D, 514 &TransferStateInternal::PerformAsyncTexImage2D,
558 state, 515 state_,
559 tex_params, 516 tex_params,
560 mem_params, 517 mem_params,
561 base::Owned(new ScopedSafeSharedMemory(safe_shared_memory_pool(), 518 base::Owned(new ScopedSafeSharedMemory(safe_shared_memory_pool(),
562 mem_params.shared_memory, 519 mem_params.shared_memory,
563 mem_params.shm_size)))); 520 mem_params.shm_size))));
564 521
565 522
566 DCHECK(CHECK_GL()); 523 DCHECK(CHECK_GL());
567 } 524 }
568 525
569 void AsyncPixelTransferDelegateEGL::AsyncTexSubImage2D( 526 void AsyncPixelTransferDelegateEGL::AsyncTexSubImage2D(
570 AsyncPixelTransferState* transfer_state,
571 const AsyncTexSubImage2DParams& tex_params, 527 const AsyncTexSubImage2DParams& tex_params,
572 const AsyncMemoryParams& mem_params) { 528 const AsyncMemoryParams& mem_params) {
573 TRACE_EVENT2("gpu", "AsyncTexSubImage2D", 529 TRACE_EVENT2("gpu", "AsyncTexSubImage2D",
574 "width", tex_params.width, 530 "width", tex_params.width,
575 "height", tex_params.height); 531 "height", tex_params.height);
576 if (WorkAroundAsyncTexSubImage2D(transfer_state, tex_params, mem_params)) 532 if (WorkAroundAsyncTexSubImage2D(tex_params, mem_params))
577 return; 533 return;
578 scoped_refptr<TransferStateInternal> state = 534 DCHECK(!state_->TransferIsInProgress());
579 static_cast<AsyncTransferStateImpl*>(transfer_state)->internal_.get();
580
581 DCHECK(state->texture_id_);
582 DCHECK(!state->TransferIsInProgress());
583 DCHECK(mem_params.shared_memory); 535 DCHECK(mem_params.shared_memory);
584 DCHECK_LE(mem_params.shm_data_offset + mem_params.shm_data_size, 536 DCHECK_LE(mem_params.shm_data_offset + mem_params.shm_data_size,
585 mem_params.shm_size); 537 mem_params.shm_size);
586 DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target); 538 DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
587 DCHECK_EQ(tex_params.level, 0); 539 DCHECK_EQ(tex_params.level, 0);
588 540
589 // Mark the transfer in progress. 541 // Mark the transfer in progress.
590 state->MarkAsTransferIsInProgress(); 542 state_->MarkAsTransferIsInProgress();
591 543
592 // If this wasn't async allocated, we don't have an EGLImage yet. 544 // If this wasn't async allocated, we don't have an EGLImage yet.
593 // Create the EGLImage if it hasn't already been created. 545 // Create the EGLImage if it hasn't already been created.
594 state->CreateEglImageOnMainThreadIfNeeded(); 546 state_->CreateEglImageOnMainThreadIfNeeded();
595 547
596 // Duplicate the shared memory so there are no way we can get 548 // Duplicate the shared memory so there are no way we can get
597 // a use-after-free of the raw pixels. 549 // a use-after-free of the raw pixels.
598 transfer_message_loop_proxy()->PostTask(FROM_HERE, 550 transfer_message_loop_proxy()->PostTask(FROM_HERE,
599 base::Bind( 551 base::Bind(
600 &TransferStateInternal::PerformAsyncTexSubImage2D, 552 &TransferStateInternal::PerformAsyncTexSubImage2D,
601 state, 553 state_,
602 tex_params, 554 tex_params,
603 mem_params, 555 mem_params,
604 base::Owned(new ScopedSafeSharedMemory(safe_shared_memory_pool(), 556 base::Owned(new ScopedSafeSharedMemory(safe_shared_memory_pool(),
605 mem_params.shared_memory, 557 mem_params.shared_memory,
606 mem_params.shm_size)), 558 mem_params.shm_size)),
607 shared_state_->texture_upload_stats)); 559 shared_state_->texture_upload_stats));
608 560
609 DCHECK(CHECK_GL()); 561 DCHECK(CHECK_GL());
610 } 562 }
611 563
612 namespace { 564 namespace {
613 bool IsPowerOfTwo (unsigned int x) { 565 bool IsPowerOfTwo (unsigned int x) {
614 return ((x != 0) && !(x & (x - 1))); 566 return ((x != 0) && !(x & (x - 1)));
615 } 567 }
616 568
617 bool IsMultipleOfEight(unsigned int x) { 569 bool IsMultipleOfEight(unsigned int x) {
618 return (x & 7) == 0; 570 return (x & 7) == 0;
619 } 571 }
620 572
621 bool DimensionsSupportImgFastPath(int width, int height) { 573 bool DimensionsSupportImgFastPath(int width, int height) {
622 // Multiple of eight, but not a power of two. 574 // Multiple of eight, but not a power of two.
623 return IsMultipleOfEight(width) && 575 return IsMultipleOfEight(width) &&
624 IsMultipleOfEight(height) && 576 IsMultipleOfEight(height) &&
625 !(IsPowerOfTwo(width) && 577 !(IsPowerOfTwo(width) &&
626 IsPowerOfTwo(height)); 578 IsPowerOfTwo(height));
627 } 579 }
628 } // namespace 580 } // namespace
629 581
630 // It is very difficult to stream uploads on Imagination GPUs: 582 // It is very difficult to stream uploads on Imagination GPUs:
631 // - glTexImage2D defers a swizzle/stall until draw-time 583 // - glTexImage2D defers a swizzle/stall until draw-time
632 // - glTexSubImage2D will sleep for 16ms on a good day, and 100ms 584 // - glTexSubImage2D will sleep for 16ms on a good day, and 100ms
633 // or longer if OpenGL is in heavy use by another thread. 585 // or longer if OpenGL is in heavy use by another thread.
634 // The one combination that avoids these problems requires: 586 // The one combination that avoids these problems requires:
635 // a.) Allocations/Uploads must occur on different threads/contexts. 587 // a.) Allocations/Uploads must occur on different threads/contexts.
636 // b.) Texture size must be non-power-of-two. 588 // b.) Texture size must be non-power-of-two.
637 // When using a+b, uploads will be incorrect/corrupt unless: 589 // When using a+b, uploads will be incorrect/corrupt unless:
638 // c.) Texture size must be a multiple-of-eight. 590 // c.) Texture size must be a multiple-of-eight.
639 // 591 //
640 // To achieve a.) we allocate synchronously on the main thread followed 592 // To achieve a.) we allocate synchronously on the main thread followed
641 // by uploading on the upload thread. When b/c are not true we fall back 593 // by uploading on the upload thread. When b/c are not true we fall back
642 // on purely synchronous allocation/upload on the main thread. 594 // on purely synchronous allocation/upload on the main thread.
643 595
644 bool AsyncPixelTransferDelegateEGL::WorkAroundAsyncTexImage2D( 596 bool AsyncPixelTransferDelegateEGL::WorkAroundAsyncTexImage2D(
645 AsyncPixelTransferState* transfer_state,
646 const AsyncTexImage2DParams& tex_params, 597 const AsyncTexImage2DParams& tex_params,
647 const AsyncMemoryParams& mem_params, 598 const AsyncMemoryParams& mem_params,
648 const base::Closure& bind_callback) { 599 const base::Closure& bind_callback) {
649 if (!shared_state_->is_imagination) 600 if (!shared_state_->is_imagination)
650 return false; 601 return false;
651 scoped_refptr<TransferStateInternal> state =
652 static_cast<AsyncTransferStateImpl*>(transfer_state)->internal_.get();
653 602
654 // On imagination we allocate synchronously all the time, even 603 // On imagination we allocate synchronously all the time, even
655 // if the dimensions support fast uploads. This is for part a.) 604 // if the dimensions support fast uploads. This is for part a.)
656 // above, so allocations occur on a different thread/context as uploads. 605 // above, so allocations occur on a different thread/context as uploads.
657 void* data = GetAddress(mem_params); 606 void* data = GetAddress(mem_params);
658 SetGlParametersForEglImageTexture(); 607 SetGlParametersForEglImageTexture();
659 608
660 { 609 {
661 TRACE_EVENT0("gpu", "glTexImage2D with data"); 610 TRACE_EVENT0("gpu", "glTexImage2D with data");
662 DoTexImage2D(tex_params, data); 611 DoTexImage2D(tex_params, data);
663 } 612 }
664 613
665 // The allocation has already occured, so mark it as finished 614 // The allocation has already occured, so mark it as finished
666 // and ready for binding. 615 // and ready for binding.
667 CHECK(!state->TransferIsInProgress()); 616 CHECK(!state_->TransferIsInProgress());
668 617
669 // If the dimensions support fast async uploads, create the 618 // If the dimensions support fast async uploads, create the
670 // EGLImage for future uploads. The late bind should not 619 // EGLImage for future uploads. The late bind should not
671 // be needed since the EGLImage was created from the main thread 620 // be needed since the EGLImage was created from the main thread
672 // texture, but this is required to prevent an imagination driver crash. 621 // texture, but this is required to prevent an imagination driver crash.
673 if (DimensionsSupportImgFastPath(tex_params.width, tex_params.height)) { 622 if (DimensionsSupportImgFastPath(tex_params.width, tex_params.height)) {
674 state->CreateEglImageOnMainThreadIfNeeded(); 623 state_->CreateEglImageOnMainThreadIfNeeded();
675 shared_state_->pending_allocations.push_back(transfer_state->AsWeakPtr()); 624 shared_state_->pending_allocations.push_back(AsWeakPtr());
676 state->bind_callback_ = bind_callback; 625 state_->bind_callback_ = bind_callback;
677 } 626 }
678 627
679 DCHECK(CHECK_GL()); 628 DCHECK(CHECK_GL());
680 return true; 629 return true;
681 } 630 }
682 631
683 bool AsyncPixelTransferDelegateEGL::WorkAroundAsyncTexSubImage2D( 632 bool AsyncPixelTransferDelegateEGL::WorkAroundAsyncTexSubImage2D(
684 AsyncPixelTransferState* transfer_state,
685 const AsyncTexSubImage2DParams& tex_params, 633 const AsyncTexSubImage2DParams& tex_params,
686 const AsyncMemoryParams& mem_params) { 634 const AsyncMemoryParams& mem_params) {
687 if (!shared_state_->is_imagination) 635 if (!shared_state_->is_imagination)
688 return false; 636 return false;
689 637
690 // If the dimensions support fast async uploads, we can use the 638 // If the dimensions support fast async uploads, we can use the
691 // normal async upload path for uploads. 639 // normal async upload path for uploads.
692 if (DimensionsSupportImgFastPath(tex_params.width, tex_params.height)) 640 if (DimensionsSupportImgFastPath(tex_params.width, tex_params.height))
693 return false; 641 return false;
694 642
695 scoped_refptr<TransferStateInternal> state =
696 static_cast<AsyncTransferStateImpl*>(transfer_state)->internal_.get();
697
698 // Fall back on a synchronous stub as we don't have a known fast path. 643 // Fall back on a synchronous stub as we don't have a known fast path.
699 // Also, older ICS drivers crash when we do any glTexSubImage2D on the 644 // Also, older ICS drivers crash when we do any glTexSubImage2D on the
700 // same thread. To work around this we do glTexImage2D instead. Since 645 // same thread. To work around this we do glTexImage2D instead. Since
701 // we didn't create an EGLImage for this texture (see above), this is 646 // we didn't create an EGLImage for this texture (see above), this is
702 // okay, but it limits this API to full updates for now. 647 // okay, but it limits this API to full updates for now.
703 DCHECK(!state->egl_image_); 648 DCHECK(!state_->egl_image_);
704 DCHECK_EQ(tex_params.xoffset, 0); 649 DCHECK_EQ(tex_params.xoffset, 0);
705 DCHECK_EQ(tex_params.yoffset, 0); 650 DCHECK_EQ(tex_params.yoffset, 0);
706 DCHECK_EQ(state->define_params_.width, tex_params.width); 651 DCHECK_EQ(state_->define_params_.width, tex_params.width);
707 DCHECK_EQ(state->define_params_.height, tex_params.height); 652 DCHECK_EQ(state_->define_params_.height, tex_params.height);
708 DCHECK_EQ(state->define_params_.level, tex_params.level); 653 DCHECK_EQ(state_->define_params_.level, tex_params.level);
709 DCHECK_EQ(state->define_params_.format, tex_params.format); 654 DCHECK_EQ(state_->define_params_.format, tex_params.format);
710 DCHECK_EQ(state->define_params_.type, tex_params.type); 655 DCHECK_EQ(state_->define_params_.type, tex_params.type);
711 656
712 void* data = GetAddress(mem_params); 657 void* data = GetAddress(mem_params);
713 base::TimeTicks begin_time; 658 base::TimeTicks begin_time;
714 if (shared_state_->texture_upload_stats.get()) 659 if (shared_state_->texture_upload_stats.get())
715 begin_time = base::TimeTicks::HighResNow(); 660 begin_time = base::TimeTicks::HighResNow();
716 { 661 {
717 TRACE_EVENT0("gpu", "glTexSubImage2D"); 662 TRACE_EVENT0("gpu", "glTexSubImage2D");
718 // Note we use define_params_ instead of tex_params. 663 // Note we use define_params_ instead of tex_params.
719 // The DCHECKs above verify this is always the same. 664 // The DCHECKs above verify this is always the same.
720 DoTexImage2D(state->define_params_, data); 665 DoTexImage2D(state_->define_params_, data);
721 } 666 }
722 if (shared_state_->texture_upload_stats.get()) { 667 if (shared_state_->texture_upload_stats.get()) {
723 shared_state_->texture_upload_stats 668 shared_state_->texture_upload_stats
724 ->AddUpload(base::TimeTicks::HighResNow() - begin_time); 669 ->AddUpload(base::TimeTicks::HighResNow() - begin_time);
725 } 670 }
726 671
727 DCHECK(CHECK_GL()); 672 DCHECK(CHECK_GL());
728 return true; 673 return true;
729 } 674 }
730 675
731 AsyncPixelTransferManagerEGL::SharedState::SharedState() 676 AsyncPixelTransferManagerEGL::SharedState::SharedState()
732 // TODO(reveman): Skip this if --enable-gpu-benchmarking is not present. 677 // TODO(reveman): Skip this if --enable-gpu-benchmarking is not present.
733 : texture_upload_stats(new AsyncPixelTransferUploadStats) { 678 : texture_upload_stats(new AsyncPixelTransferUploadStats) {
734 std::string vendor; 679 std::string vendor;
735 vendor = reinterpret_cast<const char*>(glGetString(GL_VENDOR)); 680 vendor = reinterpret_cast<const char*>(glGetString(GL_VENDOR));
736 is_imagination = vendor.find("Imagination") != std::string::npos; 681 is_imagination = vendor.find("Imagination") != std::string::npos;
737 is_qualcomm = vendor.find("Qualcomm") != std::string::npos; 682 is_qualcomm = vendor.find("Qualcomm") != std::string::npos;
738 } 683 }
739 684
740 AsyncPixelTransferManagerEGL::SharedState::~SharedState() {} 685 AsyncPixelTransferManagerEGL::SharedState::~SharedState() {}
741 686
742 AsyncPixelTransferManagerEGL::AsyncPixelTransferManagerEGL() 687 AsyncPixelTransferManagerEGL::AsyncPixelTransferManagerEGL() {}
743 : delegate_(new AsyncPixelTransferDelegateEGL(&shared_state_)) {}
744 688
745 AsyncPixelTransferManagerEGL::~AsyncPixelTransferManagerEGL() {} 689 AsyncPixelTransferManagerEGL::~AsyncPixelTransferManagerEGL() {}
746 690
747 void AsyncPixelTransferManagerEGL::BindCompletedAsyncTransfers() { 691 void AsyncPixelTransferManagerEGL::BindCompletedAsyncTransfers() {
748 scoped_ptr<gfx::ScopedTextureBinder> texture_binder; 692 scoped_ptr<gfx::ScopedTextureBinder> texture_binder;
749 693
750 while(!shared_state_.pending_allocations.empty()) { 694 while(!shared_state_.pending_allocations.empty()) {
751 if (!shared_state_.pending_allocations.front().get()) { 695 if (!shared_state_.pending_allocations.front().get()) {
752 shared_state_.pending_allocations.pop_front(); 696 shared_state_.pending_allocations.pop_front();
753 continue; 697 continue;
754 } 698 }
755 scoped_refptr<TransferStateInternal> state = 699 AsyncPixelTransferDelegateEGL* delegate =
756 static_cast<AsyncTransferStateImpl*> 700 shared_state_.pending_allocations.front().get();
757 (shared_state_.pending_allocations.front().get())->internal_.get();
758 // Terminate early, as all transfers finish in order, currently. 701 // Terminate early, as all transfers finish in order, currently.
759 if (state->TransferIsInProgress()) 702 if (delegate->TransferIsInProgress())
760 break; 703 break;
761 704
762 if (!texture_binder) 705 if (!texture_binder)
763 texture_binder.reset(new gfx::ScopedTextureBinder(GL_TEXTURE_2D, 0)); 706 texture_binder.reset(new gfx::ScopedTextureBinder(GL_TEXTURE_2D, 0));
764 707
765 // If the transfer is finished, bind it to the texture 708 // If the transfer is finished, bind it to the texture
766 // and remove it from pending list. 709 // and remove it from pending list.
767 state->BindTransfer(); 710 delegate->BindTransfer();
768 shared_state_.pending_allocations.pop_front(); 711 shared_state_.pending_allocations.pop_front();
769 } 712 }
770 } 713 }
771 714
772 void AsyncPixelTransferManagerEGL::AsyncNotifyCompletion( 715 void AsyncPixelTransferManagerEGL::AsyncNotifyCompletion(
773 const AsyncMemoryParams& mem_params, 716 const AsyncMemoryParams& mem_params,
774 const CompletionCallback& callback) { 717 const CompletionCallback& callback) {
775 DCHECK(mem_params.shared_memory); 718 DCHECK(mem_params.shared_memory);
776 DCHECK_LE(mem_params.shm_data_offset + mem_params.shm_data_size, 719 DCHECK_LE(mem_params.shm_data_offset + mem_params.shm_data_size,
777 mem_params.shm_size); 720 mem_params.shm_size);
(...skipping 21 matching lines...) Expand all
799 } 742 }
800 743
801 void AsyncPixelTransferManagerEGL::ProcessMorePendingTransfers() { 744 void AsyncPixelTransferManagerEGL::ProcessMorePendingTransfers() {
802 } 745 }
803 746
804 bool AsyncPixelTransferManagerEGL::NeedsProcessMorePendingTransfers() { 747 bool AsyncPixelTransferManagerEGL::NeedsProcessMorePendingTransfers() {
805 return false; 748 return false;
806 } 749 }
807 750
808 AsyncPixelTransferDelegate* 751 AsyncPixelTransferDelegate*
809 AsyncPixelTransferManagerEGL::GetAsyncPixelTransferDelegate() { 752 AsyncPixelTransferManagerEGL::CreatePixelTransferDelegateImpl(
810 return delegate_.get(); 753 gles2::TextureRef* ref,
754 const AsyncTexImage2DParams& define_params) {
755 return new AsyncPixelTransferDelegateEGL(
756 &shared_state_, ref->service_id(), define_params);
811 } 757 }
812 758
813 } // namespace gpu 759 } // namespace gpu
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698