Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(417)

Side by Side Diff: gpu/command_buffer/service/in_process_command_buffer.cc

Issue 19522006: GLInProcessContext: support async flushes and dedicated GPU thread (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 7 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « gpu/command_buffer/service/in_process_command_buffer.h ('k') | gpu/command_buffer_service.gypi » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
6
7 #include <queue>
8 #include <utility>
9
10 #include <GLES2/gl2.h>
11 #ifndef GL_GLEXT_PROTOTYPES
12 #define GL_GLEXT_PROTOTYPES 1
13 #endif
14 #include <GLES2/gl2ext.h>
15 #include <GLES2/gl2extchromium.h>
16
17 #include "base/bind.h"
18 #include "base/bind_helpers.h"
19 #include "base/lazy_instance.h"
20 #include "base/logging.h"
21 #include "base/memory/weak_ptr.h"
22 #include "base/message_loop/message_loop_proxy.h"
23 #include "base/threading/thread.h"
24 #include "gpu/command_buffer/common/id_allocator.h"
25 #include "gpu/command_buffer/service/command_buffer_service.h"
26 #include "gpu/command_buffer/service/context_group.h"
27 #include "gpu/command_buffer/service/gl_context_virtual.h"
28 #include "gpu/command_buffer/service/gpu_scheduler.h"
29 #include "gpu/command_buffer/service/image_manager.h"
30 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
31 #include "ui/gfx/size.h"
32 #include "ui/gl/gl_context.h"
33 #include "ui/gl/gl_image.h"
34 #include "ui/gl/gl_share_group.h"
35 #include "ui/gl/gl_surface.h"
36
37 namespace gpu {
38
39 namespace {
40
41 static base::LazyInstance<std::set<InProcessCommandBuffer*> >
42 g_all_shared_contexts = LAZY_INSTANCE_INITIALIZER;
43
44 static bool g_use_virtualized_gl_context = false;
45 static bool g_uses_explicit_scheduling = false;
46
47 template <typename T>
48 static void RunTaskWithResult(base::Callback<T(void)> task,
49 T* result,
50 base::WaitableEvent* completion) {
51 *result = task.Run();
52 completion->Signal();
53 }
54
55 class GpuInProcessThread
56 : public base::Thread,
57 public base::RefCountedThreadSafe<GpuInProcessThread> {
58 public:
59 GpuInProcessThread();
60
61 private:
62 friend class base::RefCountedThreadSafe<GpuInProcessThread>;
63 virtual ~GpuInProcessThread();
64
65 DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread);
66 };
67
68 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") {
69 Start();
70 }
71
72 GpuInProcessThread::~GpuInProcessThread() {
73 Stop();
74 }
75
76 // Used with explicit scheduling when there is no dedicated GPU thread.
77 class GpuCommandQueue {
78 public:
79 GpuCommandQueue();
80 ~GpuCommandQueue();
81
82 void QueueTask(const base::Closure& task);
83 void RunTasks();
84 void SetScheduleCallback(const base::Closure& callback);
85
86 private:
87 base::Lock tasks_lock_;
88 std::queue<base::Closure> tasks_;
89 base::Closure schedule_callback_;
90
91 DISALLOW_COPY_AND_ASSIGN(GpuCommandQueue);
92 };
93
94 GpuCommandQueue::GpuCommandQueue() {}
95
96 GpuCommandQueue::~GpuCommandQueue() {
97 base::AutoLock lock(tasks_lock_);
98 DCHECK(tasks_.empty());
99 }
100
101 void GpuCommandQueue::QueueTask(const base::Closure& task) {
102 {
103 base::AutoLock lock(tasks_lock_);
104 tasks_.push(task);
105 }
106
107 DCHECK(!schedule_callback_.is_null());
108 schedule_callback_.Run();
109 }
110
111 void GpuCommandQueue::RunTasks() {
112 size_t num_tasks;
113 {
114 base::AutoLock lock(tasks_lock_);
115 num_tasks = tasks_.size();
116 }
117
118 while (num_tasks) {
119 base::Closure task;
120 {
121 base::AutoLock lock(tasks_lock_);
122 task = tasks_.front();
123 tasks_.pop();
124 num_tasks = tasks_.size();
125 }
126
127 task.Run();
128 }
129 }
130
131 void GpuCommandQueue::SetScheduleCallback(const base::Closure& callback) {
132 DCHECK(schedule_callback_.is_null());
133 schedule_callback_ = callback;
134 }
135
136 static base::LazyInstance<GpuCommandQueue> g_gpu_queue =
137 LAZY_INSTANCE_INITIALIZER;
138
139 class SchedulerClientBase : public InProcessCommandBuffer::SchedulerClient {
140 public:
141 explicit SchedulerClientBase(bool need_thread);
142 virtual ~SchedulerClientBase();
143
144 static bool HasClients();
145
146 protected:
147 scoped_refptr<GpuInProcessThread> thread_;
148
149 private:
150 static base::LazyInstance<std::set<SchedulerClientBase*> > all_clients_;
151 static base::LazyInstance<base::Lock> all_clients_lock_;
152 };
153
154 base::LazyInstance<std::set<SchedulerClientBase*> >
155 SchedulerClientBase::all_clients_ = LAZY_INSTANCE_INITIALIZER;
156 base::LazyInstance<base::Lock> SchedulerClientBase::all_clients_lock_ =
157 LAZY_INSTANCE_INITIALIZER;
158
159 SchedulerClientBase::SchedulerClientBase(bool need_thread) {
160 base::AutoLock(all_clients_lock_.Get());
161 if (need_thread) {
162 if (!all_clients_.Get().empty()) {
163 SchedulerClientBase* other = *all_clients_.Get().begin();
164 thread_ = other->thread_;
165 DCHECK(thread_.get());
166 } else {
167 thread_ = new GpuInProcessThread;
168 }
169 }
170 all_clients_.Get().insert(this);
171 }
172
173 SchedulerClientBase::~SchedulerClientBase() {
174 base::AutoLock(all_clients_lock_.Get());
175 all_clients_.Get().erase(this);
176 }
177
178 bool SchedulerClientBase::HasClients() {
179 base::AutoLock(all_clients_lock_.Get());
180 return !all_clients_.Get().empty();
181 }
182
183 // A client that talks to the GPU thread
184 class ThreadClient : public SchedulerClientBase {
185 public:
186 ThreadClient();
187 virtual void QueueTask(const base::Closure& task) OVERRIDE;
188 };
189
190 ThreadClient::ThreadClient() : SchedulerClientBase(true) {
191 DCHECK(thread_.get());
192 }
193
194 void ThreadClient::QueueTask(const base::Closure& task) {
195 thread_->message_loop()->PostTask(FROM_HERE, task);
196 }
197
198 // A client that talks to the GpuCommandQueue
199 class QueueClient : public SchedulerClientBase {
200 public:
201 QueueClient();
202 virtual void QueueTask(const base::Closure& task) OVERRIDE;
203 };
204
205 QueueClient::QueueClient() : SchedulerClientBase(false) {
206 DCHECK(!thread_.get());
207 }
208
209 void QueueClient::QueueTask(const base::Closure& task) {
210 g_gpu_queue.Get().QueueTask(task);
211 }
212
213 static scoped_ptr<InProcessCommandBuffer::SchedulerClient>
214 CreateSchedulerClient() {
215 scoped_ptr<InProcessCommandBuffer::SchedulerClient> client;
216 if (g_uses_explicit_scheduling)
217 client.reset(new QueueClient);
218 else
219 client.reset(new ThreadClient);
220
221 return client.Pass();
222 }
223
224 class ScopedEvent {
225 public:
226 ScopedEvent(base::WaitableEvent* event) : event_(event) {}
227 ~ScopedEvent() { event_->Signal(); }
228
229 private:
230 base::WaitableEvent* event_;
231 };
232
233 } // anonyous namespace
234
235 InProcessCommandBuffer::InProcessCommandBuffer()
236 : context_lost_(false),
237 share_group_id_(0),
238 last_put_offset_(-1),
239 flush_event_(false, false),
240 queue_(CreateSchedulerClient()) {}
241
242 InProcessCommandBuffer::~InProcessCommandBuffer() {
243 Destroy();
244 }
245
246 bool InProcessCommandBuffer::IsContextLost() {
247 if (context_lost_ || !command_buffer_) {
248 return true;
249 }
250 CommandBuffer::State state = GetState();
251 return error::IsError(state.error);
252 }
253
254 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) {
255 DCHECK(!surface_->IsOffscreen());
256 surface_->Resize(size);
257 }
258
259 bool InProcessCommandBuffer::MakeCurrent() {
260 command_buffer_lock_.AssertAcquired();
261
262 if (!context_lost_ && decoder_->MakeCurrent())
263 return true;
264 DLOG(ERROR) << "Context lost because MakeCurrent failed.";
265 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
266 command_buffer_->SetParseError(gpu::error::kLostContext);
267 return false;
268 }
269
270 void InProcessCommandBuffer::PumpCommands() {
271 ScopedEvent handle_flush(&flush_event_);
272 command_buffer_lock_.AssertAcquired();
273
274 if (!MakeCurrent())
275 return;
276
277 gpu_scheduler_->PutChanged();
278 CommandBuffer::State state = command_buffer_->GetState();
279 DCHECK((!error::IsError(state.error) && !context_lost_) ||
280 (error::IsError(state.error) && context_lost_));
281 }
282
283 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) {
284 command_buffer_lock_.AssertAcquired();
285 command_buffer_->SetGetBuffer(transfer_buffer_id);
286 return true;
287 }
288
289 bool InProcessCommandBuffer::Initialize(
290 bool is_offscreen,
291 bool share_resources,
292 gfx::AcceleratedWidget window,
293 const gfx::Size& size,
294 const char* allowed_extensions,
295 const std::vector<int32>& attribs,
296 gfx::GpuPreference gpu_preference,
297 const base::Closure& context_lost_callback,
298 unsigned int share_group_id) {
299
300 share_resources_ = share_resources;
301 context_lost_callback_ = WrapCallback(context_lost_callback);
302 share_group_id_ = share_group_id;
303
304 base::WaitableEvent completion(true, false);
305 bool result;
306 base::Callback<bool(void)> init_task =
307 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread,
308 base::Unretained(this),
309 is_offscreen,
310 window,
311 size,
312 allowed_extensions,
313 attribs,
314 gpu_preference);
315 QueueTask(
316 base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion));
317 completion.Wait();
vignatti (out of this project) 2013/08/05 13:54:09 Hi. I'm not sure if content_shell through the use
318 return result;
319 }
320
321 bool InProcessCommandBuffer::InitializeOnGpuThread(
322 bool is_offscreen,
323 gfx::AcceleratedWidget window,
324 const gfx::Size& size,
325 const char* allowed_extensions,
326 const std::vector<int32>& attribs,
327 gfx::GpuPreference gpu_preference) {
328 // Use one share group for all contexts.
329 CR_DEFINE_STATIC_LOCAL(scoped_refptr<gfx::GLShareGroup>, share_group,
330 (new gfx::GLShareGroup));
331
332 DCHECK(size.width() >= 0 && size.height() >= 0);
333
334 TransferBufferManager* manager = new TransferBufferManager();
335 transfer_buffer_manager_.reset(manager);
336 manager->Initialize();
337
338 scoped_ptr<CommandBufferService> command_buffer(
339 new CommandBufferService(transfer_buffer_manager_.get()));
340 command_buffer->SetPutOffsetChangeCallback(base::Bind(
341 &InProcessCommandBuffer::PumpCommands, base::Unretained(this)));
342 command_buffer->SetParseErrorCallback(base::Bind(
343 &InProcessCommandBuffer::OnContextLost, base::Unretained(this)));
344
345 if (!command_buffer->Initialize()) {
346 LOG(ERROR) << "Could not initialize command buffer.";
347 DestroyOnGpuThread();
348 return false;
349 }
350
351 InProcessCommandBuffer* context_group = NULL;
352
353 if (share_resources_ && !g_all_shared_contexts.Get().empty()) {
354 DCHECK(share_group_id_);
355 for (std::set<InProcessCommandBuffer*>::iterator it =
356 g_all_shared_contexts.Get().begin();
357 it != g_all_shared_contexts.Get().end();
358 ++it) {
359 if ((*it)->share_group_id_ == share_group_id_) {
360 context_group = *it;
361 DCHECK(context_group->share_resources_);
362 context_lost_ = context_group->IsContextLost();
363 break;
364 }
365 }
366 if (!context_group)
367 share_group = new gfx::GLShareGroup;
368 }
369
370 bool bind_generates_resource = false;
371 decoder_.reset(gles2::GLES2Decoder::Create(
372 context_group ? context_group->decoder_->GetContextGroup()
373 : new gles2::ContextGroup(
374 NULL, NULL, NULL, NULL, bind_generates_resource)));
375
376 gpu_scheduler_.reset(
377 new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get()));
378 command_buffer->SetGetBufferChangeCallback(base::Bind(
379 &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
380 command_buffer_ = command_buffer.Pass();
381
382 decoder_->set_engine(gpu_scheduler_.get());
383
384 if (is_offscreen)
385 surface_ = gfx::GLSurface::CreateOffscreenGLSurface(size);
386 else
387 surface_ = gfx::GLSurface::CreateViewGLSurface(window);
388
389 if (!surface_.get()) {
390 LOG(ERROR) << "Could not create GLSurface.";
391 DestroyOnGpuThread();
392 return false;
393 }
394
395 if (g_use_virtualized_gl_context) {
396 context_ = share_group->GetSharedContext();
397 if (!context_.get()) {
398 context_ = gfx::GLContext::CreateGLContext(
399 share_group.get(), surface_.get(), gpu_preference);
400 share_group->SetSharedContext(context_.get());
401 }
402
403 context_ = new GLContextVirtual(
404 share_group.get(), context_.get(), decoder_->AsWeakPtr());
405 if (context_->Initialize(surface_.get(), gpu_preference)) {
406 VLOG(1) << "Created virtual GL context.";
407 } else {
408 context_ = NULL;
409 }
410 } else {
411 context_ = gfx::GLContext::CreateGLContext(
412 share_group.get(), surface_.get(), gpu_preference);
413 }
414
415 if (!context_.get()) {
416 LOG(ERROR) << "Could not create GLContext.";
417 DestroyOnGpuThread();
418 return false;
419 }
420
421 if (!context_->MakeCurrent(surface_.get())) {
422 LOG(ERROR) << "Could not make context current.";
423 DestroyOnGpuThread();
424 return false;
425 }
426
427 gles2::DisallowedFeatures disallowed_features;
428 disallowed_features.swap_buffer_complete_callback = true;
429 disallowed_features.gpu_memory_manager = true;
430 if (!decoder_->Initialize(surface_,
431 context_,
432 is_offscreen,
433 size,
434 disallowed_features,
435 allowed_extensions,
436 attribs)) {
437 LOG(ERROR) << "Could not initialize decoder.";
438 DestroyOnGpuThread();
439 return false;
440 }
441
442 if (!is_offscreen) {
443 decoder_->SetResizeCallback(base::Bind(
444 &InProcessCommandBuffer::OnResizeView, base::Unretained(this)));
445 }
446
447 if (share_resources_) {
448 g_all_shared_contexts.Pointer()->insert(this);
449 }
450
451 return true;
452 }
453
454 void InProcessCommandBuffer::Destroy() {
455 base::WaitableEvent completion(true, false);
456 bool result;
457 base::Callback<bool(void)> destroy_task = base::Bind(
458 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
459 QueueTask(
460 base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion));
461 completion.Wait();
462 }
463
464 bool InProcessCommandBuffer::DestroyOnGpuThread() {
465 command_buffer_.reset();
466 // Clean up GL resources if possible.
467 bool have_context = context_ && context_->MakeCurrent(surface_);
468 if (decoder_) {
469 decoder_->Destroy(have_context);
470 decoder_.reset();
471 }
472 context_ = NULL;
473 surface_ = NULL;
474
475 g_all_shared_contexts.Pointer()->erase(this);
476 return true;
477 }
478
479 unsigned int InProcessCommandBuffer::CreateImageForGpuMemoryBuffer(
480 gfx::GpuMemoryBufferHandle buffer,
481 gfx::Size size) {
482 unsigned int image_id;
483 {
484 // TODO: ID allocation should go through CommandBuffer
485 base::AutoLock lock(command_buffer_lock_);
486 gles2::ContextGroup* group = decoder_->GetContextGroup();
487 image_id =
488 group->GetIdAllocator(gles2::id_namespaces::kImages)->AllocateID();
489 }
490 base::Closure image_task =
491 base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread,
492 base::Unretained(this), buffer, size, image_id);
493 QueueTask(image_task);
494 return image_id;
495 }
496
497 void InProcessCommandBuffer::CreateImageOnGpuThread(
498 gfx::GpuMemoryBufferHandle buffer,
499 gfx::Size size,
500 unsigned int image_id) {
501 scoped_refptr<gfx::GLImage> gl_image =
502 gfx::GLImage::CreateGLImageForGpuMemoryBuffer(buffer, size);
503 decoder_->GetContextGroup()->image_manager()->AddImage(gl_image, image_id);
504 }
505
506 void InProcessCommandBuffer::RemoveImage(unsigned int image_id) {
507 {
508 // TODO: ID allocation should go through CommandBuffer
509 base::AutoLock lock(command_buffer_lock_);
510 gles2::ContextGroup* group = decoder_->GetContextGroup();
511 group->GetIdAllocator(gles2::id_namespaces::kImages)->FreeID(image_id);
512 }
513 base::Closure image_manager_task =
514 base::Bind(&InProcessCommandBuffer::RemoveImageOnGpuThread,
515 base::Unretained(this),
516 image_id);
517 QueueTask(image_manager_task);
518 }
519
520 void InProcessCommandBuffer::RemoveImageOnGpuThread(unsigned int image_id) {
521 decoder_->GetContextGroup()->image_manager()->RemoveImage(image_id);
522 }
523
524 void InProcessCommandBuffer::OnContextLost() {
525 if (!context_lost_callback_.is_null()) {
526 context_lost_callback_.Run();
527 context_lost_callback_.Reset();
528 }
529
530 context_lost_ = true;
531 if (share_resources_) {
532 for (std::set<InProcessCommandBuffer*>::iterator it =
533 g_all_shared_contexts.Get().begin();
534 it != g_all_shared_contexts.Get().end();
535 ++it) {
536 (*it)->context_lost_ = true;
537 }
538 }
539 }
540
541 CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
542 base::AutoLock lock(command_buffer_lock_);
543 return last_state_ = command_buffer_->GetState();
544 }
545
546 CommandBuffer::State InProcessCommandBuffer::GetState() {
547 return GetStateFast();
548 }
549
550 CommandBuffer::State InProcessCommandBuffer::GetLastState() {
551 return last_state_;
552 }
553
554 int32 InProcessCommandBuffer::GetLastToken() { return last_state_.token; }
555
556 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) {
557 base::AutoLock lock(command_buffer_lock_);
558 command_buffer_->Flush(put_offset);
559 }
560
561 void InProcessCommandBuffer::Flush(int32 put_offset) {
562 if (last_state_.error != gpu::error::kNoError)
563 return;
564
565 if (last_put_offset_ == put_offset)
566 return;
567
568 last_put_offset_ = put_offset;
569 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
570 base::Unretained(this),
571 put_offset);
572 QueueTask(task);
573 }
574
575 CommandBuffer::State InProcessCommandBuffer::FlushSync(int32 put_offset,
576 int32 last_known_get) {
577 if (put_offset == last_known_get || last_state_.error != gpu::error::kNoError)
578 return last_state_;
579
580 Flush(put_offset);
581 GetStateFast();
582 while (last_known_get == last_state_.get_offset &&
583 last_state_.error == gpu::error::kNoError) {
584 flush_event_.Wait();
585 GetStateFast();
586 }
587
588 return last_state_;
589 }
590
591 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) {
592 if (last_state_.error != gpu::error::kNoError)
593 return;
594
595 {
596 base::AutoLock lock(command_buffer_lock_);
597 command_buffer_->SetGetBuffer(shm_id);
598 last_put_offset_ = 0;
599 }
600 GetStateFast();
601 }
602
603 gpu::Buffer InProcessCommandBuffer::CreateTransferBuffer(size_t size,
604 int32* id) {
605 base::AutoLock lock(command_buffer_lock_);
606 return command_buffer_->CreateTransferBuffer(size, id);
607 }
608
609 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) {
610 base::Closure task = base::Bind(&CommandBuffer::DestroyTransferBuffer,
611 base::Unretained(command_buffer_.get()),
612 id);
613
614 QueueTask(task);
615 }
616
617 gpu::Buffer InProcessCommandBuffer::GetTransferBuffer(int32 id) {
618 NOTREACHED();
619 return gpu::Buffer();
620 }
621
622 uint32 InProcessCommandBuffer::InsertSyncPoint() {
623 NOTREACHED();
624 return 0;
625 }
626 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point,
627 const base::Closure& callback) {
628 QueueTask(WrapCallback(callback));
629 }
630
631 gpu::error::Error InProcessCommandBuffer::GetLastError() {
632 return last_state_.error;
633 }
634
635 bool InProcessCommandBuffer::Initialize() {
636 NOTREACHED();
637 return false;
638 }
639
640 void InProcessCommandBuffer::SetGetOffset(int32 get_offset) { NOTREACHED(); }
641
642 void InProcessCommandBuffer::SetToken(int32 token) { NOTREACHED(); }
643
644 void InProcessCommandBuffer::SetParseError(gpu::error::Error error) {
645 NOTREACHED();
646 }
647
648 void InProcessCommandBuffer::SetContextLostReason(
649 gpu::error::ContextLostReason reason) {
650 NOTREACHED();
651 }
652
653 namespace {
654
655 void PostCallback(const scoped_refptr<base::MessageLoopProxy>& loop,
656 const base::Closure& callback) {
657 if (!loop->BelongsToCurrentThread()) {
658 loop->PostTask(FROM_HERE, callback);
659 } else {
660 callback.Run();
661 }
662 }
663
664 void RunOnTargetThread(scoped_ptr<base::Closure> callback) {
665 DCHECK(callback.get());
666 callback->Run();
667 }
668
669 } // anonymous namespace
670
671 base::Closure InProcessCommandBuffer::WrapCallback(
672 const base::Closure& callback) {
673 // Make sure the callback gets deleted on the target thread by passing
674 // ownership.
675 scoped_ptr<base::Closure> scoped_callback(new base::Closure(callback));
676 base::Closure callback_on_client_thread =
677 base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback));
678 base::Closure wrapped_callback =
679 base::Bind(&PostCallback, base::MessageLoopProxy::current(),
680 callback_on_client_thread);
681 return wrapped_callback;
682 }
683
684 // static
685 void InProcessCommandBuffer::EnableVirtualizedContext() {
686 g_use_virtualized_gl_context = true;
687 }
688
689 // static
690 void InProcessCommandBuffer::SetScheduleCallback(
691 const base::Closure& callback) {
692 DCHECK(!g_uses_explicit_scheduling);
693 DCHECK(!SchedulerClientBase::HasClients());
694 g_uses_explicit_scheduling = true;
695 g_gpu_queue.Get().SetScheduleCallback(callback);
696 }
697
698 // static
699 void InProcessCommandBuffer::ProcessGpuWorkOnCurrentThread() {
700 g_gpu_queue.Get().RunTasks();
701 }
702
703 } // namespace gpu
OLDNEW
« no previous file with comments | « gpu/command_buffer/service/in_process_command_buffer.h ('k') | gpu/command_buffer_service.gypi » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698