Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(3)

Side by Side Diff: gpu/command_buffer/service/in_process_command_buffer.cc

Issue 1331843005: Implemented new fence syncs which replaces the old sync points. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Fixed memory leak Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2013 The Chromium Authors. All rights reserved. 1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "gpu/command_buffer/service/in_process_command_buffer.h" 5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
6 6
7 #include <queue> 7 #include <queue>
8 #include <set> 8 #include <set>
9 #include <utility> 9 #include <utility>
10 10
11 #include "base/bind.h" 11 #include "base/bind.h"
12 #include "base/bind_helpers.h" 12 #include "base/bind_helpers.h"
13 #include "base/command_line.h" 13 #include "base/command_line.h"
14 #include "base/lazy_instance.h" 14 #include "base/lazy_instance.h"
15 #include "base/location.h" 15 #include "base/location.h"
16 #include "base/logging.h" 16 #include "base/logging.h"
17 #include "base/memory/weak_ptr.h" 17 #include "base/memory/weak_ptr.h"
18 #include "base/sequence_checker.h" 18 #include "base/sequence_checker.h"
19 #include "base/single_thread_task_runner.h" 19 #include "base/single_thread_task_runner.h"
20 #include "base/thread_task_runner_handle.h" 20 #include "base/thread_task_runner_handle.h"
21 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" 21 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
22 #include "gpu/command_buffer/common/gles2_cmd_format.h"
22 #include "gpu/command_buffer/common/value_state.h" 23 #include "gpu/command_buffer/common/value_state.h"
23 #include "gpu/command_buffer/service/command_buffer_service.h" 24 #include "gpu/command_buffer/service/command_buffer_service.h"
24 #include "gpu/command_buffer/service/context_group.h" 25 #include "gpu/command_buffer/service/context_group.h"
25 #include "gpu/command_buffer/service/gl_context_virtual.h" 26 #include "gpu/command_buffer/service/gl_context_virtual.h"
26 #include "gpu/command_buffer/service/gpu_scheduler.h" 27 #include "gpu/command_buffer/service/gpu_scheduler.h"
27 #include "gpu/command_buffer/service/gpu_switches.h" 28 #include "gpu/command_buffer/service/gpu_switches.h"
28 #include "gpu/command_buffer/service/image_factory.h" 29 #include "gpu/command_buffer/service/image_factory.h"
29 #include "gpu/command_buffer/service/image_manager.h" 30 #include "gpu/command_buffer/service/image_manager.h"
30 #include "gpu/command_buffer/service/mailbox_manager.h" 31 #include "gpu/command_buffer/service/mailbox_manager.h"
31 #include "gpu/command_buffer/service/memory_program_cache.h" 32 #include "gpu/command_buffer/service/memory_program_cache.h"
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after
175 } 176 }
176 177
177 InProcessCommandBuffer::InProcessCommandBuffer( 178 InProcessCommandBuffer::InProcessCommandBuffer(
178 const scoped_refptr<Service>& service) 179 const scoped_refptr<Service>& service)
179 : command_buffer_id_(g_next_command_buffer_id.GetNext()), 180 : command_buffer_id_(g_next_command_buffer_id.GetNext()),
180 context_lost_(false), 181 context_lost_(false),
181 delayed_work_pending_(false), 182 delayed_work_pending_(false),
182 image_factory_(nullptr), 183 image_factory_(nullptr),
183 last_put_offset_(-1), 184 last_put_offset_(-1),
184 gpu_memory_buffer_manager_(nullptr), 185 gpu_memory_buffer_manager_(nullptr),
186 next_fence_sync_release_(1),
187 flushed_fence_sync_release_(0),
185 flush_event_(false, false), 188 flush_event_(false, false),
186 service_(GetInitialService(service)), 189 service_(GetInitialService(service)),
190 fence_sync_wait_event_(false, false),
187 gpu_thread_weak_ptr_factory_(this) { 191 gpu_thread_weak_ptr_factory_(this) {
188 DCHECK(service_.get()); 192 DCHECK(service_.get());
189 next_image_id_.GetNext(); 193 next_image_id_.GetNext();
190 } 194 }
191 195
192 InProcessCommandBuffer::~InProcessCommandBuffer() { 196 InProcessCommandBuffer::~InProcessCommandBuffer() {
193 Destroy(); 197 Destroy();
194 } 198 }
195 199
196 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) { 200 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) {
(...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after
337 else 341 else
338 surface_ = gfx::GLSurface::CreateViewGLSurface(params.window); 342 surface_ = gfx::GLSurface::CreateViewGLSurface(params.window);
339 } 343 }
340 344
341 if (!surface_.get()) { 345 if (!surface_.get()) {
342 LOG(ERROR) << "Could not create GLSurface."; 346 LOG(ERROR) << "Could not create GLSurface.";
343 DestroyOnGpuThread(); 347 DestroyOnGpuThread();
344 return false; 348 return false;
345 } 349 }
346 350
347 sync_point_client_state_ = SyncPointClientState::Create(); 351 sync_point_order_data_ = SyncPointOrderData::Create();
348 sync_point_client_ = service_->sync_point_manager()->CreateSyncPointClient( 352 sync_point_client_ = service_->sync_point_manager()->CreateSyncPointClient(
349 sync_point_client_state_, 353 sync_point_order_data_, GetNamespaceID(), GetCommandBufferID());
350 GetNamespaceID(), GetCommandBufferID());
351 354
352 if (service_->UseVirtualizedGLContexts() || 355 if (service_->UseVirtualizedGLContexts() ||
353 decoder_->GetContextGroup() 356 decoder_->GetContextGroup()
354 ->feature_info() 357 ->feature_info()
355 ->UseVirtualizedGLContexts()) { 358 ->UseVirtualizedGLContexts()) {
356 context_ = gl_share_group_->GetSharedContext(); 359 context_ = gl_share_group_->GetSharedContext();
357 if (!context_.get()) { 360 if (!context_.get()) {
358 context_ = gfx::GLContext::CreateGLContext( 361 context_ = gfx::GLContext::CreateGLContext(
359 gl_share_group_.get(), surface_.get(), params.gpu_preference); 362 gl_share_group_.get(), surface_.get(), params.gpu_preference);
360 gl_share_group_->SetSharedContext(context_.get()); 363 gl_share_group_->SetSharedContext(context_.get());
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
406 } 409 }
407 *params.capabilities = decoder_->GetCapabilities(); 410 *params.capabilities = decoder_->GetCapabilities();
408 411
409 if (!params.is_offscreen) { 412 if (!params.is_offscreen) {
410 decoder_->SetResizeCallback(base::Bind( 413 decoder_->SetResizeCallback(base::Bind(
411 &InProcessCommandBuffer::OnResizeView, gpu_thread_weak_ptr_)); 414 &InProcessCommandBuffer::OnResizeView, gpu_thread_weak_ptr_));
412 } 415 }
413 decoder_->SetWaitSyncPointCallback( 416 decoder_->SetWaitSyncPointCallback(
414 base::Bind(&InProcessCommandBuffer::WaitSyncPointOnGpuThread, 417 base::Bind(&InProcessCommandBuffer::WaitSyncPointOnGpuThread,
415 base::Unretained(this))); 418 base::Unretained(this)));
419 decoder_->SetFenceSyncReleaseCallback(
420 base::Bind(&InProcessCommandBuffer::FenceSyncReleaseOnGpuThread,
421 base::Unretained(this)));
422 decoder_->SetWaitFenceSyncCallback(
423 base::Bind(&InProcessCommandBuffer::WaitFenceSyncOnGpuThread,
424 base::Unretained(this)));
416 425
417 image_factory_ = params.image_factory; 426 image_factory_ = params.image_factory;
418 427
419 return true; 428 return true;
420 } 429 }
421 430
422 void InProcessCommandBuffer::Destroy() { 431 void InProcessCommandBuffer::Destroy() {
423 CheckSequencedThread(); 432 CheckSequencedThread();
424 433
425 base::WaitableEvent completion(true, false); 434 base::WaitableEvent completion(true, false);
(...skipping 11 matching lines...) Expand all
437 command_buffer_.reset(); 446 command_buffer_.reset();
438 // Clean up GL resources if possible. 447 // Clean up GL resources if possible.
439 bool have_context = context_.get() && context_->MakeCurrent(surface_.get()); 448 bool have_context = context_.get() && context_->MakeCurrent(surface_.get());
440 if (decoder_) { 449 if (decoder_) {
441 decoder_->Destroy(have_context); 450 decoder_->Destroy(have_context);
442 decoder_.reset(); 451 decoder_.reset();
443 } 452 }
444 context_ = NULL; 453 context_ = NULL;
445 surface_ = NULL; 454 surface_ = NULL;
446 sync_point_client_ = NULL; 455 sync_point_client_ = NULL;
447 sync_point_client_state_ = NULL; 456 if (sync_point_order_data_) {
457 sync_point_order_data_->Destroy();
458 sync_point_order_data_ = nullptr;
459 }
448 gl_share_group_ = NULL; 460 gl_share_group_ = NULL;
449 #if defined(OS_ANDROID) 461 #if defined(OS_ANDROID)
450 stream_texture_manager_.reset(); 462 stream_texture_manager_.reset();
451 #endif 463 #endif
452 464
453 return true; 465 return true;
454 } 466 }
455 467
456 void InProcessCommandBuffer::CheckSequencedThread() { 468 void InProcessCommandBuffer::CheckSequencedThread() {
457 DCHECK(!sequence_checker_ || 469 DCHECK(!sequence_checker_ ||
(...skipping 28 matching lines...) Expand all
486 GetStateFast(); 498 GetStateFast();
487 return last_state_.token; 499 return last_state_.token;
488 } 500 }
489 501
490 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset, 502 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset,
491 uint32_t order_num) { 503 uint32_t order_num) {
492 CheckSequencedThread(); 504 CheckSequencedThread();
493 ScopedEvent handle_flush(&flush_event_); 505 ScopedEvent handle_flush(&flush_event_);
494 base::AutoLock lock(command_buffer_lock_); 506 base::AutoLock lock(command_buffer_lock_);
495 507
496 sync_point_client_state_->BeginProcessingOrderNumber(order_num); 508 sync_point_order_data_->BeginProcessingOrderNumber(order_num);
497 command_buffer_->Flush(put_offset); 509 command_buffer_->Flush(put_offset);
498 { 510 {
499 // Update state before signaling the flush event. 511 // Update state before signaling the flush event.
500 base::AutoLock lock(state_after_last_flush_lock_); 512 base::AutoLock lock(state_after_last_flush_lock_);
501 state_after_last_flush_ = command_buffer_->GetLastState(); 513 state_after_last_flush_ = command_buffer_->GetLastState();
502 } 514 }
503 DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) || 515 DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) ||
504 (error::IsError(state_after_last_flush_.error) && context_lost_)); 516 (error::IsError(state_after_last_flush_.error) && context_lost_));
505 517
506 // Currently the in process command buffer does not support being descheduled, 518 // Currently the in process command buffer does not support being descheduled,
507 // if it does we would need to back off on calling the finish processing 519 // if it does we would need to back off on calling the finish processing
508 // order number function until the message is rescheduled and finished 520 // order number function until the message is rescheduled and finished
509 // processing. This DCHECK is to enforce this. 521 // processing. This DCHECK is to enforce this.
510 DCHECK(context_lost_ || put_offset == state_after_last_flush_.get_offset); 522 DCHECK(context_lost_ || put_offset == state_after_last_flush_.get_offset);
511 sync_point_client_state_->FinishProcessingOrderNumber(order_num); 523 sync_point_order_data_->FinishProcessingOrderNumber(order_num);
512 524
513 // If we've processed all pending commands but still have pending queries, 525 // If we've processed all pending commands but still have pending queries,
514 // pump idle work until the query is passed. 526 // pump idle work until the query is passed.
515 if (put_offset == state_after_last_flush_.get_offset && 527 if (put_offset == state_after_last_flush_.get_offset &&
516 (gpu_scheduler_->HasMoreIdleWork() || 528 (gpu_scheduler_->HasMoreIdleWork() ||
517 gpu_scheduler_->HasPendingQueries())) { 529 gpu_scheduler_->HasPendingQueries())) {
518 ScheduleDelayedWorkOnGpuThread(); 530 ScheduleDelayedWorkOnGpuThread();
519 } 531 }
520 } 532 }
521 533
(...skipping 23 matching lines...) Expand all
545 void InProcessCommandBuffer::Flush(int32 put_offset) { 557 void InProcessCommandBuffer::Flush(int32 put_offset) {
546 CheckSequencedThread(); 558 CheckSequencedThread();
547 if (last_state_.error != gpu::error::kNoError) 559 if (last_state_.error != gpu::error::kNoError)
548 return; 560 return;
549 561
550 if (last_put_offset_ == put_offset) 562 if (last_put_offset_ == put_offset)
551 return; 563 return;
552 564
553 SyncPointManager* sync_manager = service_->sync_point_manager(); 565 SyncPointManager* sync_manager = service_->sync_point_manager();
554 const uint32_t order_num = 566 const uint32_t order_num =
555 sync_point_client_state_->GenerateUnprocessedOrderNumber(sync_manager); 567 sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager);
556 last_put_offset_ = put_offset; 568 last_put_offset_ = put_offset;
557 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread, 569 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
558 gpu_thread_weak_ptr_, 570 gpu_thread_weak_ptr_,
559 put_offset, 571 put_offset,
560 order_num); 572 order_num);
561 QueueTask(task); 573 QueueTask(task);
574
575 flushed_fence_sync_release_ = next_fence_sync_release_ - 1;
562 } 576 }
563 577
564 void InProcessCommandBuffer::OrderingBarrier(int32 put_offset) { 578 void InProcessCommandBuffer::OrderingBarrier(int32 put_offset) {
565 Flush(put_offset); 579 Flush(put_offset);
566 } 580 }
567 581
568 void InProcessCommandBuffer::WaitForTokenInRange(int32 start, int32 end) { 582 void InProcessCommandBuffer::WaitForTokenInRange(int32 start, int32 end) {
569 CheckSequencedThread(); 583 CheckSequencedThread();
570 while (!InRange(start, end, GetLastToken()) && 584 while (!InRange(start, end, GetLastToken()) &&
571 last_state_.error == gpu::error::kNoError) 585 last_state_.error == gpu::error::kNoError)
(...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after
790 804
791 void InProcessCommandBuffer::RetireSyncPointOnGpuThread(uint32 sync_point) { 805 void InProcessCommandBuffer::RetireSyncPointOnGpuThread(uint32 sync_point) {
792 gles2::MailboxManager* mailbox_manager = 806 gles2::MailboxManager* mailbox_manager =
793 decoder_->GetContextGroup()->mailbox_manager(); 807 decoder_->GetContextGroup()->mailbox_manager();
794 if (mailbox_manager->UsesSync()) { 808 if (mailbox_manager->UsesSync()) {
795 bool make_current_success = false; 809 bool make_current_success = false;
796 { 810 {
797 base::AutoLock lock(command_buffer_lock_); 811 base::AutoLock lock(command_buffer_lock_);
798 make_current_success = MakeCurrent(); 812 make_current_success = MakeCurrent();
799 } 813 }
800 if (make_current_success) 814 if (make_current_success) {
801 mailbox_manager->PushTextureUpdates(sync_point); 815 // Old sync points are global and do not have a command buffer ID,
816 // We can simply use the GPUIO namespace with 0 for the command buffer ID
817 // (under normal circumstances 0 is invalid so will not be used) until
818 // the old sync points are replaced.
819 gles2::SyncToken sync_token = {
820 gpu::CommandBufferNamespace::GPU_IO,
821 0,
822 sync_point
823 };
824 mailbox_manager->PushTextureUpdates(sync_token);
825 }
802 } 826 }
803 service_->sync_point_manager()->RetireSyncPoint(sync_point); 827 service_->sync_point_manager()->RetireSyncPoint(sync_point);
804 } 828 }
805 829
806 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point, 830 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point,
807 const base::Closure& callback) { 831 const base::Closure& callback) {
808 CheckSequencedThread(); 832 CheckSequencedThread();
809 QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread, 833 QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread,
810 base::Unretained(this), 834 base::Unretained(this),
811 sync_point, 835 sync_point,
812 WrapCallback(callback))); 836 WrapCallback(callback)));
813 } 837 }
814 838
815 bool InProcessCommandBuffer::WaitSyncPointOnGpuThread(unsigned sync_point) { 839 bool InProcessCommandBuffer::WaitSyncPointOnGpuThread(unsigned sync_point) {
816 service_->sync_point_manager()->WaitSyncPoint(sync_point); 840 service_->sync_point_manager()->WaitSyncPoint(sync_point);
817 gles2::MailboxManager* mailbox_manager = 841 gles2::MailboxManager* mailbox_manager =
818 decoder_->GetContextGroup()->mailbox_manager(); 842 decoder_->GetContextGroup()->mailbox_manager();
819 mailbox_manager->PullTextureUpdates(sync_point); 843 // Old sync points are global and do not have a command buffer ID,
844 // We can simply use the GPUIO namespace with 0 for the command buffer ID
845 // (under normal circumstances 0 is invalid so will not be used) until
846 // the old sync points are replaced.
847 gles2::SyncToken sync_token = {
848 gpu::CommandBufferNamespace::GPU_IO,
849 0,
850 sync_point
851 };
852 mailbox_manager->PullTextureUpdates(sync_token);
820 return true; 853 return true;
821 } 854 }
822 855
856 void InProcessCommandBuffer::FenceSyncReleaseOnGpuThread(uint64_t release) {
857 if (!sync_point_client_->client_state()->IsFenceSyncReleased(release)) {
piman 2015/09/30 22:50:23 Can you make this a DCHECK? For the in-process cas
David Yen 2015/09/30 23:55:18 Done.
858 gles2::MailboxManager* mailbox_manager =
859 decoder_->GetContextGroup()->mailbox_manager();
860 if (mailbox_manager->UsesSync()) {
861 bool make_current_success = false;
862 {
863 base::AutoLock lock(command_buffer_lock_);
864 make_current_success = MakeCurrent();
865 }
866 if (make_current_success) {
867 gles2::SyncToken sync_token = {
868 GetNamespaceID(),
869 GetCommandBufferID(),
870 release
871 };
872 mailbox_manager->PushTextureUpdates(sync_token);
873 }
874 }
875
876 sync_point_client_->ReleaseFenceSync(release);
877 }
878 }
879
880 bool InProcessCommandBuffer::WaitFenceSyncOnGpuThread(
881 gpu::CommandBufferNamespace namespace_id,
882 uint64_t command_buffer_id,
883 uint64_t release) {
884 gpu::SyncPointManager* sync_point_manager = service_->sync_point_manager();
885 DCHECK(sync_point_manager);
886
887 scoped_refptr<gpu::SyncPointClientState> release_state =
888 sync_point_manager->GetSyncPointClientState(namespace_id,
889 command_buffer_id);
890
891 if (!release_state)
892 return true;
893
894 if (!release_state->IsFenceSyncReleased(release)) {
895 // Use waitable event which is signalled when the release fence is released.
896 sync_point_client_->Wait(
897 release_state,
898 release,
899 base::Bind(&base::WaitableEvent::Signal,
900 base::Unretained(&fence_sync_wait_event_)));
901 fence_sync_wait_event_.Wait();
902 }
903
904 gles2::MailboxManager* mailbox_manager =
905 decoder_->GetContextGroup()->mailbox_manager();
906 gles2::SyncToken sync_token = {
907 namespace_id,
908 command_buffer_id,
909 release
910 };
911 mailbox_manager->PullTextureUpdates(sync_token);
912 return true;
913 }
914
823 void InProcessCommandBuffer::SignalSyncPointOnGpuThread( 915 void InProcessCommandBuffer::SignalSyncPointOnGpuThread(
824 unsigned sync_point, 916 unsigned sync_point,
825 const base::Closure& callback) { 917 const base::Closure& callback) {
826 service_->sync_point_manager()->AddSyncPointCallback(sync_point, callback); 918 service_->sync_point_manager()->AddSyncPointCallback(sync_point, callback);
827 } 919 }
828 920
829 void InProcessCommandBuffer::SignalQuery(unsigned query_id, 921 void InProcessCommandBuffer::SignalQuery(unsigned query_id,
830 const base::Closure& callback) { 922 const base::Closure& callback) {
831 CheckSequencedThread(); 923 CheckSequencedThread();
832 QueueTask(base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread, 924 QueueTask(base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread,
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
873 } 965 }
874 966
875 CommandBufferNamespace InProcessCommandBuffer::GetNamespaceID() const { 967 CommandBufferNamespace InProcessCommandBuffer::GetNamespaceID() const {
876 return CommandBufferNamespace::IN_PROCESS; 968 return CommandBufferNamespace::IN_PROCESS;
877 } 969 }
878 970
879 uint64_t InProcessCommandBuffer::GetCommandBufferID() const { 971 uint64_t InProcessCommandBuffer::GetCommandBufferID() const {
880 return command_buffer_id_; 972 return command_buffer_id_;
881 } 973 }
882 974
975 uint64_t InProcessCommandBuffer::GenerateFenceSyncRelease() {
976 return next_fence_sync_release_++;
977 }
978
979 bool InProcessCommandBuffer::IsFenceSyncRelease(uint64_t release) {
980 return release != 0 && release < next_fence_sync_release_;
981 }
982
983 bool InProcessCommandBuffer::IsFenceSyncFlushed(uint64_t release) {
984 return release <= flushed_fence_sync_release_;
985 }
986
883 uint32 InProcessCommandBuffer::CreateStreamTextureOnGpuThread( 987 uint32 InProcessCommandBuffer::CreateStreamTextureOnGpuThread(
884 uint32 client_texture_id) { 988 uint32 client_texture_id) {
885 #if defined(OS_ANDROID) 989 #if defined(OS_ANDROID)
886 return stream_texture_manager_->CreateStreamTexture( 990 return stream_texture_manager_->CreateStreamTexture(
887 client_texture_id, decoder_->GetContextGroup()->texture_manager()); 991 client_texture_id, decoder_->GetContextGroup()->texture_manager());
888 #else 992 #else
889 return 0; 993 return 0;
890 #endif 994 #endif
891 } 995 }
892 996
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after
987 framebuffer_completeness_cache_ = 1091 framebuffer_completeness_cache_ =
988 new gpu::gles2::FramebufferCompletenessCache; 1092 new gpu::gles2::FramebufferCompletenessCache;
989 return framebuffer_completeness_cache_; 1093 return framebuffer_completeness_cache_;
990 } 1094 }
991 1095
992 SyncPointManager* GpuInProcessThread::sync_point_manager() { 1096 SyncPointManager* GpuInProcessThread::sync_point_manager() {
993 return sync_point_manager_; 1097 return sync_point_manager_;
994 } 1098 }
995 1099
996 } // namespace gpu 1100 } // namespace gpu
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698