Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(254)

Side by Side Diff: ui/gl/async_pixel_transfer_delegate_android.cc

Issue 12218088: gpu: Change qualcomm work-around. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Tweak Created 7 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "ui/gl/async_pixel_transfer_delegate_android.h" 5 #include "ui/gl/async_pixel_transfer_delegate_android.h"
6 6
7 #include <string> 7 #include <string>
8 8
9 #include "base/bind.h" 9 #include "base/bind.h"
10 #include "base/debug/trace_event.h" 10 #include "base/debug/trace_event.h"
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
45 } 45 }
46 while ((glerror = glGetError()) != GL_NO_ERROR) { 46 while ((glerror = glGetError()) != GL_NO_ERROR) {
47 LOG(ERROR) << "Async transfer OpenGL error at " 47 LOG(ERROR) << "Async transfer OpenGL error at "
48 << file << ":" << line << " " << glerror; 48 << file << ":" << line << " " << glerror;
49 success = false; 49 success = false;
50 } 50 }
51 return success; 51 return success;
52 } 52 }
53 #define CHECK_GL() CheckErrors(__FILE__, __LINE__) 53 #define CHECK_GL() CheckErrors(__FILE__, __LINE__)
54 54
55 // Regular glTexImage2D call.
56 void DoTexImage2D(const AsyncTexImage2DParams& tex_params, void* data) {
57 glTexImage2D(
58 GL_TEXTURE_2D, tex_params.level, tex_params.internal_format,
59 tex_params.width, tex_params.height,
60 tex_params.border, tex_params.format, tex_params.type, data);
61 }
62
63 // Regular glTexSubImage2D call.
64 void DoTexSubImage2D(const AsyncTexSubImage2DParams& tex_params, void* data) {
65 glTexSubImage2D(
66 GL_TEXTURE_2D, tex_params.level,
67 tex_params.xoffset, tex_params.yoffset,
68 tex_params.width, tex_params.height,
69 tex_params.format, tex_params.type, data);
70 }
71
72 // Full glTexSubImage2D call, from glTexImage2D params.
73 void DoFullTexSubImage2D(const AsyncTexImage2DParams& tex_params, void* data) {
74 glTexSubImage2D(
75 GL_TEXTURE_2D, tex_params.level,
76 0, 0, tex_params.width, tex_params.height,
77 tex_params.format, tex_params.type, data);
78 }
79
80
55 // We duplicate shared memory to avoid use-after-free issues. This could also 81 // We duplicate shared memory to avoid use-after-free issues. This could also
56 // be solved by ref-counting something, or with a destruction callback. There 82 // be solved by ref-counting something, or with a destruction callback. There
57 // wasn't an obvious hook or ref-counted container, so for now we dup/mmap. 83 // wasn't an obvious hook or ref-counted container, so for now we dup/mmap.
58 SharedMemory* DuplicateSharedMemory(SharedMemory* shared_memory, uint32 size) { 84 SharedMemory* DuplicateSharedMemory(SharedMemory* shared_memory, uint32 size) {
59 // Duplicate the handle. 85 // Duplicate the handle.
60 SharedMemoryHandle duped_shared_memory_handle; 86 SharedMemoryHandle duped_shared_memory_handle;
61 if (!shared_memory->ShareToProcess( 87 if (!shared_memory->ShareToProcess(
62 base::GetCurrentProcessHandle(), 88 base::GetCurrentProcessHandle(),
63 &duped_shared_memory_handle)) { 89 &duped_shared_memory_handle)) {
64 CHECK(false); // Diagnosing a crash. 90 CHECK(false); // Diagnosing a crash.
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
131 } // namespace 157 } // namespace
132 158
133 // Class which holds async pixel transfers state (EGLImage). 159 // Class which holds async pixel transfers state (EGLImage).
134 // The EGLImage is accessed by either thread, but everything 160 // The EGLImage is accessed by either thread, but everything
135 // else accessed only on the main thread. 161 // else accessed only on the main thread.
136 class TransferStateInternal 162 class TransferStateInternal
137 : public base::RefCountedThreadSafe<TransferStateInternal> { 163 : public base::RefCountedThreadSafe<TransferStateInternal> {
138 public: 164 public:
139 explicit TransferStateInternal(GLuint texture_id, 165 explicit TransferStateInternal(GLuint texture_id,
140 bool wait_for_uploads, 166 bool wait_for_uploads,
141 bool wait_for_egl_images) 167 bool use_image_preserved)
142 : texture_id_(texture_id), 168 : texture_id_(texture_id),
143 thread_texture_id_(0), 169 thread_texture_id_(0),
144 needs_late_bind_(false), 170 needs_late_bind_(false),
145 transfer_in_progress_(false), 171 transfer_in_progress_(false),
146 egl_image_(EGL_NO_IMAGE_KHR), 172 egl_image_(EGL_NO_IMAGE_KHR),
147 wait_for_uploads_(wait_for_uploads), 173 wait_for_uploads_(wait_for_uploads),
148 wait_for_egl_images_(wait_for_egl_images) { 174 use_image_preserved_(use_image_preserved) {
149 static const AsyncTexImage2DParams zero_params = {0, 0, 0, 0, 0, 0, 0, 0}; 175 static const AsyncTexImage2DParams zero_params = {0, 0, 0, 0, 0, 0, 0, 0};
150 late_bind_define_params_ = zero_params; 176 late_bind_define_params_ = zero_params;
151 } 177 }
152 178
153 // Implement AsyncPixelTransferState: 179 // Implement AsyncPixelTransferState:
154 bool TransferIsInProgress() { 180 bool TransferIsInProgress() {
155 return transfer_in_progress_; 181 return transfer_in_progress_;
156 } 182 }
157 183
158 void BindTransfer(AsyncTexImage2DParams* bound_params) { 184 void BindTransfer(AsyncTexImage2DParams* bound_params) {
(...skipping 21 matching lines...) Expand all
180 void CreateEglImage(GLuint texture_id) { 206 void CreateEglImage(GLuint texture_id) {
181 TRACE_EVENT0("gpu", "eglCreateImageKHR"); 207 TRACE_EVENT0("gpu", "eglCreateImageKHR");
182 DCHECK(texture_id); 208 DCHECK(texture_id);
183 DCHECK_EQ(egl_image_, EGL_NO_IMAGE_KHR); 209 DCHECK_EQ(egl_image_, EGL_NO_IMAGE_KHR);
184 210
185 EGLDisplay egl_display = eglGetCurrentDisplay(); 211 EGLDisplay egl_display = eglGetCurrentDisplay();
186 EGLContext egl_context = eglGetCurrentContext(); 212 EGLContext egl_context = eglGetCurrentContext();
187 EGLenum egl_target = EGL_GL_TEXTURE_2D_KHR; 213 EGLenum egl_target = EGL_GL_TEXTURE_2D_KHR;
188 EGLClientBuffer egl_buffer = 214 EGLClientBuffer egl_buffer =
189 reinterpret_cast<EGLClientBuffer>(texture_id); 215 reinterpret_cast<EGLClientBuffer>(texture_id);
216
217 EGLint image_preserved = use_image_preserved_ ? EGL_TRUE : EGL_FALSE;
190 EGLint egl_attrib_list[] = { 218 EGLint egl_attrib_list[] = {
191 EGL_GL_TEXTURE_LEVEL_KHR, 0, // mip-level to reference. 219 EGL_GL_TEXTURE_LEVEL_KHR, 0, // mip-level.
192 EGL_IMAGE_PRESERVED_KHR, EGL_FALSE, // throw away texture data. 220 EGL_IMAGE_PRESERVED_KHR, image_preserved,
193 EGL_NONE 221 EGL_NONE
194 }; 222 };
195 egl_image_ = eglCreateImageKHR( 223 egl_image_ = eglCreateImageKHR(
196 egl_display, 224 egl_display,
197 egl_context, 225 egl_context,
198 egl_target, 226 egl_target,
199 egl_buffer, 227 egl_buffer,
200 egl_attrib_list); 228 egl_attrib_list);
201 229
202 DCHECK_NE(EGL_NO_IMAGE_KHR, egl_image_); 230 DCHECK_NE(EGL_NO_IMAGE_KHR, egl_image_);
203 } 231 }
204 232
205 void CreateEglImageOnUploadThread() { 233 void CreateEglImageOnUploadThread() {
206 CreateEglImage(thread_texture_id_); 234 CreateEglImage(thread_texture_id_);
207 } 235 }
208 236
209 void CreateEglImageOnMainThreadIfNeeded() { 237 void CreateEglImageOnMainThreadIfNeeded() {
210 if (egl_image_ == EGL_NO_IMAGE_KHR) 238 if (egl_image_ == EGL_NO_IMAGE_KHR)
211 CreateEglImage(texture_id_); 239 CreateEglImage(texture_id_);
212 } 240 }
213 241
214 void WaitOnEglImageCreation() {
215 // On Qualcomm, we need to wait on egl image creation before
216 // doing the first upload (or the texture remains black).
217 // A fence after image creation didn't always work, but glFinish
218 // seems to always work, and this only happens on the upload thread.
219 if (wait_for_egl_images_) {
220 TRACE_EVENT0("gpu", "glFinish");
221 glFinish();
222 }
223 }
224
225 void WaitForLastUpload() { 242 void WaitForLastUpload() {
226 // This glFinish is just a safe-guard for if uploads have some 243 // This glFinish is just a safe-guard for if uploads have some
227 // GPU action that needs to occur. We could use fences and try 244 // GPU action that needs to occur. We could use fences and try
228 // to do this less often. However, on older drivers fences are 245 // to do this less often. However, on older drivers fences are
229 // not always reliable (eg. Mali-400 just blocks forever). 246 // not always reliable (eg. Mali-400 just blocks forever).
230 if (wait_for_uploads_) { 247 if (wait_for_uploads_) {
231 TRACE_EVENT0("gpu", "glFinish"); 248 TRACE_EVENT0("gpu", "glFinish");
232 glFinish(); 249 glFinish();
233 } 250 }
234 } 251 }
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
271 // It would be nice if we could just create a new EGLImage for 288 // It would be nice if we could just create a new EGLImage for
272 // every upload, but I found that didn't work, so this stores 289 // every upload, but I found that didn't work, so this stores
273 // one for the lifetime of the texture. 290 // one for the lifetime of the texture.
274 EGLImageKHR egl_image_; 291 EGLImageKHR egl_image_;
275 292
276 // Time spent performing last transfer. 293 // Time spent performing last transfer.
277 base::TimeDelta last_transfer_time_; 294 base::TimeDelta last_transfer_time_;
278 295
279 // Customize when we block on fences (these are work-arounds). 296 // Customize when we block on fences (these are work-arounds).
280 bool wait_for_uploads_; 297 bool wait_for_uploads_;
281 bool wait_for_egl_images_; 298 bool use_image_preserved_;
282 }; 299 };
283 300
284 // Android needs thread-safe ref-counting, so this just wraps 301 // Android needs thread-safe ref-counting, so this just wraps
285 // an internal thread-safe ref-counted state object. 302 // an internal thread-safe ref-counted state object.
286 class AsyncTransferStateAndroid : public AsyncPixelTransferState { 303 class AsyncTransferStateAndroid : public AsyncPixelTransferState {
287 public: 304 public:
288 explicit AsyncTransferStateAndroid(GLuint texture_id, 305 explicit AsyncTransferStateAndroid(GLuint texture_id,
289 bool wait_for_uploads, 306 bool wait_for_uploads,
290 bool wait_for_egl_images) 307 bool use_image_preserved)
291 : internal_(new TransferStateInternal(texture_id, 308 : internal_(new TransferStateInternal(texture_id,
292 wait_for_uploads, 309 wait_for_uploads,
293 wait_for_egl_images)) { 310 use_image_preserved)) {
294 } 311 }
295 virtual ~AsyncTransferStateAndroid() {} 312 virtual ~AsyncTransferStateAndroid() {}
296 virtual bool TransferIsInProgress() { 313 virtual bool TransferIsInProgress() {
297 return internal_->TransferIsInProgress(); 314 return internal_->TransferIsInProgress();
298 } 315 }
299 virtual void BindTransfer(AsyncTexImage2DParams* bound_params) { 316 virtual void BindTransfer(AsyncTexImage2DParams* bound_params) {
300 internal_->BindTransfer(bound_params); 317 internal_->BindTransfer(bound_params);
301 } 318 }
302 scoped_refptr<TransferStateInternal> internal_; 319 scoped_refptr<TransferStateInternal> internal_;
303 }; 320 };
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
404 } 421 }
405 422
406 AsyncPixelTransferDelegateAndroid::~AsyncPixelTransferDelegateAndroid() { 423 AsyncPixelTransferDelegateAndroid::~AsyncPixelTransferDelegateAndroid() {
407 } 424 }
408 425
409 AsyncPixelTransferState* 426 AsyncPixelTransferState*
410 AsyncPixelTransferDelegateAndroid:: 427 AsyncPixelTransferDelegateAndroid::
411 CreateRawPixelTransferState(GLuint texture_id) { 428 CreateRawPixelTransferState(GLuint texture_id) {
412 429
413 // We can't wait on uploads on imagination (it can take 200ms+). 430 // We can't wait on uploads on imagination (it can take 200ms+).
431 // In practice, they are complete when the CPU glSubTexImage2D completes.
414 bool wait_for_uploads = !is_imagination_; 432 bool wait_for_uploads = !is_imagination_;
415 433
416 // We need to wait for EGLImage creation on Qualcomm. 434 // Qualcomm has a race when using image_preserved=FALSE,
417 bool wait_for_egl_images = is_qualcomm_; 435 // which can result in black textures even after the first upload.
436 // Since using FALSE is mainly for performance (to avoid layout changes),
437 // but Qualcomm itself doesn't seem to get any performance benefit,
438 // we just using image_preservedd=TRUE on Qualcomm as a work-around.
439 bool use_image_preserved = is_qualcomm_ || is_imagination_;
418 440
419 return static_cast<AsyncPixelTransferState*>( 441 return static_cast<AsyncPixelTransferState*>(
420 new AsyncTransferStateAndroid(texture_id, 442 new AsyncTransferStateAndroid(texture_id,
421 wait_for_uploads, 443 wait_for_uploads,
422 wait_for_egl_images)); 444 use_image_preserved));
423 } 445 }
424 446
425 namespace { 447 namespace {
426 // Dummy function to measure completion on 448 // Dummy function to measure completion on
427 // the upload thread. 449 // the upload thread.
428 void NoOp() {} 450 void NoOp() {}
429 } // namespace 451 } // namespace
430 452
431 void AsyncPixelTransferDelegateAndroid::AsyncNotifyCompletion( 453 void AsyncPixelTransferDelegateAndroid::AsyncNotifyCompletion(
432 const base::Closure& task) { 454 const base::Closure& task) {
(...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after
571 593
572 void* data = GetAddress(shared_memory, shared_memory_data_offset); 594 void* data = GetAddress(shared_memory, shared_memory_data_offset);
573 { 595 {
574 TRACE_EVENT0("gpu", "glTexImage2D no data"); 596 TRACE_EVENT0("gpu", "glTexImage2D no data");
575 glGenTextures(1, &state->thread_texture_id_); 597 glGenTextures(1, &state->thread_texture_id_);
576 glActiveTexture(GL_TEXTURE0); 598 glActiveTexture(GL_TEXTURE0);
577 glBindTexture(GL_TEXTURE_2D, state->thread_texture_id_); 599 glBindTexture(GL_TEXTURE_2D, state->thread_texture_id_);
578 600
579 SetGlParametersForEglImageTexture(); 601 SetGlParametersForEglImageTexture();
580 602
581 // Allocate first, so we can create the EGLImage without 603 // If we need to use image_preserved, we pass the data with
582 // EGL_IMAGE_PRESERVED, which can be costly. 604 // the allocation. Otherwise we use a NULL allocation to
583 glTexImage2D( 605 // try to avoid any costs associated with creating the EGLImage.
584 GL_TEXTURE_2D, 606 if (state->use_image_preserved_)
585 tex_params.level, 607 DoTexImage2D(tex_params, data);
586 tex_params.internal_format, 608 else
587 tex_params.width, 609 DoTexImage2D(tex_params, NULL);
588 tex_params.height,
589 tex_params.border,
590 tex_params.format,
591 tex_params.type,
592 NULL);
593 } 610 }
594 611
595 state->CreateEglImageOnUploadThread(); 612 state->CreateEglImageOnUploadThread();
596 state->WaitOnEglImageCreation();
597 613
598 { 614 {
599 TRACE_EVENT0("gpu", "glTexSubImage2D with data"); 615 TRACE_EVENT0("gpu", "glTexSubImage2D with data");
600 glTexSubImage2D( 616
601 GL_TEXTURE_2D, 617 // If we didn't use image_preserved, we haven't uploaded
602 tex_params.level, 618 // the data yet, so we do this with a full texSubImage.
603 0, 619 if (!state->use_image_preserved_)
604 0, 620 DoFullTexSubImage2D(tex_params, data);
605 tex_params.width,
606 tex_params.height,
607 tex_params.format,
608 tex_params.type,
609 data);
610 } 621 }
611 622
612 state->WaitForLastUpload(); 623 state->WaitForLastUpload();
613 DCHECK(CHECK_GL()); 624 DCHECK(CHECK_GL());
614 } 625 }
615 626
616 void AsyncPixelTransferDelegateAndroid::PerformAsyncTexSubImage2D( 627 void AsyncPixelTransferDelegateAndroid::PerformAsyncTexSubImage2D(
617 TransferStateInternal* state, 628 TransferStateInternal* state,
618 AsyncTexSubImage2DParams tex_params, 629 AsyncTexSubImage2DParams tex_params,
619 base::SharedMemory* shared_memory, 630 base::SharedMemory* shared_memory,
620 uint32 shared_memory_data_offset) { 631 uint32 shared_memory_data_offset) {
621 TRACE_EVENT2("gpu", "PerformAsyncTexSubImage2D", 632 TRACE_EVENT2("gpu", "PerformAsyncTexSubImage2D",
622 "width", tex_params.width, 633 "width", tex_params.width,
623 "height", tex_params.height); 634 "height", tex_params.height);
624 635
625 DCHECK(state); 636 DCHECK(state);
626 DCHECK_NE(EGL_NO_IMAGE_KHR, state->egl_image_); 637 DCHECK_NE(EGL_NO_IMAGE_KHR, state->egl_image_);
627 DCHECK_EQ(0, tex_params.level); 638 DCHECK_EQ(0, tex_params.level);
628 639
629 state->WaitOnEglImageCreation();
630
631 void* data = GetAddress(shared_memory, shared_memory_data_offset); 640 void* data = GetAddress(shared_memory, shared_memory_data_offset);
632 641
633 base::TimeTicks begin_time(base::TimeTicks::HighResNow()); 642 base::TimeTicks begin_time(base::TimeTicks::HighResNow());
634 if (!state->thread_texture_id_) { 643 if (!state->thread_texture_id_) {
635 TRACE_EVENT0("gpu", "glEGLImageTargetTexture2DOES"); 644 TRACE_EVENT0("gpu", "glEGLImageTargetTexture2DOES");
636 glGenTextures(1, &state->thread_texture_id_); 645 glGenTextures(1, &state->thread_texture_id_);
637 glActiveTexture(GL_TEXTURE0); 646 glActiveTexture(GL_TEXTURE0);
638 glBindTexture(GL_TEXTURE_2D, state->thread_texture_id_); 647 glBindTexture(GL_TEXTURE_2D, state->thread_texture_id_);
639 glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, state->egl_image_); 648 glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, state->egl_image_);
640 } else { 649 } else {
641 glActiveTexture(GL_TEXTURE0); 650 glActiveTexture(GL_TEXTURE0);
642 glBindTexture(GL_TEXTURE_2D, state->thread_texture_id_); 651 glBindTexture(GL_TEXTURE_2D, state->thread_texture_id_);
643 } 652 }
644 { 653 {
645 TRACE_EVENT0("gpu", "glTexSubImage2D"); 654 TRACE_EVENT0("gpu", "glTexSubImage2D");
646 glTexSubImage2D( 655 DoTexSubImage2D(tex_params, data);
647 GL_TEXTURE_2D,
648 tex_params.level,
649 tex_params.xoffset,
650 tex_params.yoffset,
651 tex_params.width,
652 tex_params.height,
653 tex_params.format,
654 tex_params.type,
655 data);
656 } 656 }
657 state->WaitForLastUpload(); 657 state->WaitForLastUpload();
658 658
659 DCHECK(CHECK_GL()); 659 DCHECK(CHECK_GL());
660 state->last_transfer_time_ = base::TimeTicks::HighResNow() - begin_time; 660 state->last_transfer_time_ = base::TimeTicks::HighResNow() - begin_time;
661 } 661 }
662 662
663 663
664 namespace { 664 namespace {
665 bool IsPowerOfTwo (unsigned int x) { 665 bool IsPowerOfTwo (unsigned int x) {
666 return ((x != 0) && !(x & (x - 1))); 666 return ((x != 0) && !(x & (x - 1)));
667 } 667 }
668 668
669 bool IsMultipleOfEight(unsigned int x) { 669 bool IsMultipleOfEight(unsigned int x) {
670 return (x & 7) == 0; 670 return (x & 7) == 0;
671 } 671 }
672 672
673 bool DimensionsSupportImgFastPath(int width, int height) { 673 bool DimensionsSupportImgFastPath(int width, int height) {
674 // Multiple of eight, but not a power of two. 674 // Multiple of eight, but not a power of two.
675 return IsMultipleOfEight(width) && 675 return IsMultipleOfEight(width) &&
676 IsMultipleOfEight(height) && 676 IsMultipleOfEight(height) &&
677 !IsPowerOfTwo(width) && 677 !(IsPowerOfTwo(width) &&
678 !IsPowerOfTwo(height); 678 IsPowerOfTwo(height));
679 } 679 }
680 } // namespace 680 } // namespace
681 681
682 // It is very difficult to stream uploads on Imagination GPUs: 682 // It is very difficult to stream uploads on Imagination GPUs:
683 // - glTexImage2D defers a swizzle/stall until draw-time 683 // - glTexImage2D defers a swizzle/stall until draw-time
684 // - glTexSubImage2D will sleep for 16ms on a good day, and 100ms 684 // - glTexSubImage2D will sleep for 16ms on a good day, and 100ms
685 // or longer if OpenGL is in heavy use by another thread. 685 // or longer if OpenGL is in heavy use by another thread.
686 // The one combination that avoids these problems requires: 686 // The one combination that avoids these problems requires:
687 // a.) Allocations/Uploads must occur on different threads/contexts. 687 // a.) Allocations/Uploads must occur on different threads/contexts.
688 // b.) Texture size must be non-power-of-two. 688 // b.) Texture size must be non-power-of-two.
(...skipping 12 matching lines...) Expand all
701 return false; 701 return false;
702 702
703 // On imagination we allocate synchronously all the time, even 703 // On imagination we allocate synchronously all the time, even
704 // if the dimensions support fast uploads. This is for part a.) 704 // if the dimensions support fast uploads. This is for part a.)
705 // above, so allocations occur on a different thread/context as uploads. 705 // above, so allocations occur on a different thread/context as uploads.
706 void* data = GetAddress(mem_params.shared_memory, mem_params.shm_data_offset); 706 void* data = GetAddress(mem_params.shared_memory, mem_params.shm_data_offset);
707 SetGlParametersForEglImageTexture(); 707 SetGlParametersForEglImageTexture();
708 708
709 { 709 {
710 TRACE_EVENT0("gpu", "glTexImage2D with data"); 710 TRACE_EVENT0("gpu", "glTexImage2D with data");
711 glTexImage2D( 711 DoTexImage2D(tex_params, data);
712 GL_TEXTURE_2D,
713 tex_params.level,
714 tex_params.internal_format,
715 tex_params.width,
716 tex_params.height,
717 tex_params.border,
718 tex_params.format,
719 tex_params.type,
720 data);
721 } 712 }
722 713
723 // The allocation has already occured, so mark it as finished 714 // The allocation has already occured, so mark it as finished
724 // and ready for binding. 715 // and ready for binding.
725 state->needs_late_bind_ = false; 716 state->needs_late_bind_ = false;
726 state->transfer_in_progress_ = false; 717 state->transfer_in_progress_ = false;
727 state->late_bind_define_params_ = tex_params; 718 state->late_bind_define_params_ = tex_params;
728 719
729 // If the dimensions support fast async uploads, create the 720 // If the dimensions support fast async uploads, create the
730 // EGLImage for future uploads. The late bind should not 721 // EGLImage for future uploads. The late bind should not
(...skipping 19 matching lines...) Expand all
750 // normal async upload path for uploads. 741 // normal async upload path for uploads.
751 if (DimensionsSupportImgFastPath(tex_params.width, tex_params.height)) 742 if (DimensionsSupportImgFastPath(tex_params.width, tex_params.height))
752 return false; 743 return false;
753 744
754 // Fall back on a synchronous stub as we don't have a known fast path. 745 // Fall back on a synchronous stub as we don't have a known fast path.
755 void* data = GetAddress(mem_params.shared_memory, 746 void* data = GetAddress(mem_params.shared_memory,
756 mem_params.shm_data_offset); 747 mem_params.shm_data_offset);
757 base::TimeTicks begin_time(base::TimeTicks::HighResNow()); 748 base::TimeTicks begin_time(base::TimeTicks::HighResNow());
758 { 749 {
759 TRACE_EVENT0("gpu", "glTexSubImage2D"); 750 TRACE_EVENT0("gpu", "glTexSubImage2D");
760 glTexSubImage2D( 751 DoTexSubImage2D(tex_params, data);
761 tex_params.target,
762 tex_params.level,
763 tex_params.xoffset,
764 tex_params.yoffset,
765 tex_params.width,
766 tex_params.height,
767 tex_params.format,
768 tex_params.type,
769 data);
770 } 752 }
771 texture_upload_count_++; 753 texture_upload_count_++;
772 total_texture_upload_time_ += base::TimeTicks::HighResNow() - begin_time; 754 total_texture_upload_time_ += base::TimeTicks::HighResNow() - begin_time;
773 755
774 DCHECK(CHECK_GL()); 756 DCHECK(CHECK_GL());
775 return true; 757 return true;
776 } 758 }
777 759
778 } // namespace gfx 760 } // namespace gfx
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698