Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(558)

Side by Side Diff: content/common/gpu/gpu_channel.cc

Issue 12340118: GPU: Only allow the UI channel to preempt if all stubs are scheduled. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Fix nit. Created 7 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « content/common/gpu/gpu_channel.h ('k') | content/common/gpu/gpu_command_buffer_stub.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if defined(OS_WIN) 5 #if defined(OS_WIN)
6 #include <windows.h> 6 #include <windows.h>
7 #endif 7 #endif
8 8
9 #include "content/common/gpu/gpu_channel.h" 9 #include "content/common/gpu/gpu_channel.h"
10 10
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after
150 public: 150 public:
151 // Takes ownership of gpu_channel (see below). 151 // Takes ownership of gpu_channel (see below).
152 SyncPointMessageFilter(base::WeakPtr<GpuChannel>* gpu_channel, 152 SyncPointMessageFilter(base::WeakPtr<GpuChannel>* gpu_channel,
153 scoped_refptr<SyncPointManager> sync_point_manager, 153 scoped_refptr<SyncPointManager> sync_point_manager,
154 scoped_refptr<base::MessageLoopProxy> message_loop) 154 scoped_refptr<base::MessageLoopProxy> message_loop)
155 : preemption_state_(IDLE), 155 : preemption_state_(IDLE),
156 gpu_channel_(gpu_channel), 156 gpu_channel_(gpu_channel),
157 channel_(NULL), 157 channel_(NULL),
158 sync_point_manager_(sync_point_manager), 158 sync_point_manager_(sync_point_manager),
159 message_loop_(message_loop), 159 message_loop_(message_loop),
160 messages_received_(0) { 160 messages_received_(0),
161 a_stub_is_descheduled_(false) {
161 } 162 }
162 163
163 virtual void OnFilterAdded(IPC::Channel* channel) OVERRIDE { 164 virtual void OnFilterAdded(IPC::Channel* channel) OVERRIDE {
164 DCHECK(!channel_); 165 DCHECK(!channel_);
165 channel_ = channel; 166 channel_ = channel;
166 } 167 }
167 168
168 virtual void OnFilterRemoved() OVERRIDE { 169 virtual void OnFilterRemoved() OVERRIDE {
169 DCHECK(channel_); 170 DCHECK(channel_);
170 channel_ = NULL; 171 channel_ = NULL;
(...skipping 29 matching lines...) Expand all
200 } 201 }
201 } 202 }
202 203
203 void MessageProcessed(uint64 messages_processed) { 204 void MessageProcessed(uint64 messages_processed) {
204 while (!pending_messages_.empty() && 205 while (!pending_messages_.empty() &&
205 pending_messages_.front().message_number <= messages_processed) 206 pending_messages_.front().message_number <= messages_processed)
206 pending_messages_.pop(); 207 pending_messages_.pop();
207 UpdatePreemptionState(); 208 UpdatePreemptionState();
208 } 209 }
209 210
210 void SetPreemptingFlag(gpu::PreemptionFlag* preempting_flag) { 211 void SetPreemptingFlagAndSchedulingState(
212 gpu::PreemptionFlag* preempting_flag,
213 bool a_stub_is_descheduled) {
211 preempting_flag_ = preempting_flag; 214 preempting_flag_ = preempting_flag;
215 a_stub_is_descheduled_ = a_stub_is_descheduled;
216 }
217
218 void UpdateStubSchedulingState(bool a_stub_is_descheduled) {
219 a_stub_is_descheduled_ = a_stub_is_descheduled;
220 UpdatePreemptionState();
212 } 221 }
213 222
214 protected: 223 protected:
215 virtual ~SyncPointMessageFilter() { 224 virtual ~SyncPointMessageFilter() {
216 message_loop_->PostTask(FROM_HERE, base::Bind( 225 message_loop_->PostTask(FROM_HERE, base::Bind(
217 &SyncPointMessageFilter::DeleteWeakPtrOnMainThread, gpu_channel_)); 226 &SyncPointMessageFilter::DeleteWeakPtrOnMainThread, gpu_channel_));
218 } 227 }
219 228
220 private: 229 private:
221 enum PreemptionState { 230 enum PreemptionState {
222 // Either there's no other channel to preempt, there are no messages 231 // Either there's no other channel to preempt, there are no messages
223 // pending processing, or we just finished preempting and have to wait 232 // pending processing, or we just finished preempting and have to wait
224 // before preempting again. 233 // before preempting again.
225 IDLE, 234 IDLE,
226 // We are waiting kPreemptWaitTimeMs before checking if we should preempt. 235 // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
227 WAITING, 236 WAITING,
228 // We can preempt whenever any IPC processing takes more than 237 // We can preempt whenever any IPC processing takes more than
229 // kPreemptWaitTimeMs. 238 // kPreemptWaitTimeMs.
230 CHECKING, 239 CHECKING,
231 // We are currently preempting. 240 // We are currently preempting (i.e. no stub is descheduled).
232 PREEMPTING, 241 PREEMPTING,
242 // We would like to preempt, but some stub is descheduled.
243 WOULD_PREEMPT_DESCHEDULED,
233 }; 244 };
234 245
235 PreemptionState preemption_state_; 246 PreemptionState preemption_state_;
236 247
248 // Maximum amount of time that we can spend in PREEMPTING.
249 // It is reset when we transition to IDLE.
250 base::TimeDelta max_preemption_time_;
251
237 struct PendingMessage { 252 struct PendingMessage {
238 uint64 message_number; 253 uint64 message_number;
239 base::TimeTicks time_received; 254 base::TimeTicks time_received;
240 255
241 explicit PendingMessage(uint64 message_number) 256 explicit PendingMessage(uint64 message_number)
242 : message_number(message_number), 257 : message_number(message_number),
243 time_received(base::TimeTicks::Now()) { 258 time_received(base::TimeTicks::Now()) {
244 } 259 }
245 }; 260 };
246 261
(...skipping 12 matching lines...) Expand all
259 base::TimeDelta time_elapsed = 274 base::TimeDelta time_elapsed =
260 base::TimeTicks::Now() - pending_messages_.front().time_received; 275 base::TimeTicks::Now() - pending_messages_.front().time_received;
261 if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) { 276 if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) {
262 // Schedule another check for when the IPC may go long. 277 // Schedule another check for when the IPC may go long.
263 timer_.Start( 278 timer_.Start(
264 FROM_HERE, 279 FROM_HERE,
265 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) - 280 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) -
266 time_elapsed, 281 time_elapsed,
267 this, &SyncPointMessageFilter::UpdatePreemptionState); 282 this, &SyncPointMessageFilter::UpdatePreemptionState);
268 } else { 283 } else {
269 TransitionToPreempting(); 284 if (a_stub_is_descheduled_)
285 TransitionToWouldPreemptDescheduled();
286 else
287 TransitionToPreempting();
270 } 288 }
271 } 289 }
272 break; 290 break;
273 case PREEMPTING: 291 case PREEMPTING:
274 if (pending_messages_.empty()) { 292 // A TransitionToIdle() timer should always be running in this state.
275 TransitionToIdle(); 293 DCHECK(timer_.IsRunning());
276 } else { 294 if (a_stub_is_descheduled_)
277 base::TimeDelta time_elapsed = 295 TransitionToWouldPreemptDescheduled();
278 base::TimeTicks::Now() - pending_messages_.front().time_received; 296 else
279 if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs) 297 TransitionToIdleIfCaughtUp();
280 TransitionToIdle(); 298 break;
281 } 299 case WOULD_PREEMPT_DESCHEDULED:
300 // A TransitionToIdle() timer should never be running in this state.
301 DCHECK(!timer_.IsRunning());
302 if (!a_stub_is_descheduled_)
303 TransitionToPreempting();
304 else
305 TransitionToIdleIfCaughtUp();
282 break; 306 break;
283 default: 307 default:
284 NOTREACHED(); 308 NOTREACHED();
285 } 309 }
286 } 310 }
287 311
312 void TransitionToIdleIfCaughtUp() {
313 DCHECK(preemption_state_ == PREEMPTING ||
314 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
315 if (pending_messages_.empty()) {
316 TransitionToIdle();
317 } else {
318 base::TimeDelta time_elapsed =
319 base::TimeTicks::Now() - pending_messages_.front().time_received;
320 if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs)
321 TransitionToIdle();
322 }
323 }
324
288 void TransitionToIdle() { 325 void TransitionToIdle() {
289 DCHECK_EQ(preemption_state_, PREEMPTING); 326 DCHECK(preemption_state_ == PREEMPTING ||
327 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
290 // Stop any outstanding timer set to force us from PREEMPTING to IDLE. 328 // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
291 timer_.Stop(); 329 timer_.Stop();
292 330
293 preemption_state_ = IDLE; 331 preemption_state_ = IDLE;
294 preempting_flag_->Reset(); 332 preempting_flag_->Reset();
295 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0); 333 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
296 334
297 UpdatePreemptionState(); 335 UpdatePreemptionState();
298 } 336 }
299 337
300 void TransitionToWaiting() { 338 void TransitionToWaiting() {
301 DCHECK_EQ(preemption_state_, IDLE); 339 DCHECK_EQ(preemption_state_, IDLE);
302 DCHECK(!timer_.IsRunning()); 340 DCHECK(!timer_.IsRunning());
303 341
304 preemption_state_ = WAITING; 342 preemption_state_ = WAITING;
305 timer_.Start( 343 timer_.Start(
306 FROM_HERE, 344 FROM_HERE,
307 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs), 345 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs),
308 this, &SyncPointMessageFilter::TransitionToChecking); 346 this, &SyncPointMessageFilter::TransitionToChecking);
309 } 347 }
310 348
311 void TransitionToChecking() { 349 void TransitionToChecking() {
312 DCHECK_EQ(preemption_state_, WAITING); 350 DCHECK_EQ(preemption_state_, WAITING);
313 DCHECK(!timer_.IsRunning()); 351 DCHECK(!timer_.IsRunning());
314 352
315 preemption_state_ = CHECKING; 353 preemption_state_ = CHECKING;
354 max_preemption_time_ = base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs);
316 UpdatePreemptionState(); 355 UpdatePreemptionState();
317 } 356 }
318 357
319 void TransitionToPreempting() { 358 void TransitionToPreempting() {
320 DCHECK_EQ(preemption_state_, CHECKING); 359 DCHECK(preemption_state_ == CHECKING ||
360 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
361 DCHECK(!a_stub_is_descheduled_);
321 362
322 // Stop any pending state update checks that we may have queued 363 // Stop any pending state update checks that we may have queued
323 // while CHECKING. 364 // while CHECKING.
324 timer_.Stop(); 365 if (preemption_state_ == CHECKING)
366 timer_.Stop();
325 367
326 preemption_state_ = PREEMPTING; 368 preemption_state_ = PREEMPTING;
327 preempting_flag_->Set(); 369 preempting_flag_->Set();
328 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1); 370 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
329 371
330 timer_.Start( 372 timer_.Start(
331 FROM_HERE, 373 FROM_HERE,
332 base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs), 374 max_preemption_time_,
333 this, &SyncPointMessageFilter::TransitionToIdle); 375 this, &SyncPointMessageFilter::TransitionToIdle);
334 376
335 UpdatePreemptionState(); 377 UpdatePreemptionState();
336 } 378 }
337 379
380 void TransitionToWouldPreemptDescheduled() {
381 DCHECK(preemption_state_ == CHECKING ||
382 preemption_state_ == PREEMPTING);
383 DCHECK(a_stub_is_descheduled_);
384
385 if (preemption_state_ == CHECKING) {
386 // Stop any pending state update checks that we may have queued
387 // while CHECKING.
388 timer_.Stop();
389 } else {
390 // Stop any TransitionToIdle() timers that we may have queued
391 // while PREEMPTING.
392 timer_.Stop();
393 max_preemption_time_ = timer_.desired_run_time() - base::TimeTicks::Now();
394 if (max_preemption_time_ < base::TimeDelta()) {
395 TransitionToIdle();
396 return;
397 }
398 }
399
400 preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
401 preempting_flag_->Reset();
402 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
403
404 UpdatePreemptionState();
405 }
406
338 static void InsertSyncPointOnMainThread( 407 static void InsertSyncPointOnMainThread(
339 base::WeakPtr<GpuChannel>* gpu_channel, 408 base::WeakPtr<GpuChannel>* gpu_channel,
340 scoped_refptr<SyncPointManager> manager, 409 scoped_refptr<SyncPointManager> manager,
341 int32 routing_id, 410 int32 routing_id,
342 uint32 sync_point) { 411 uint32 sync_point) {
343 // This function must ensure that the sync point will be retired. Normally 412 // This function must ensure that the sync point will be retired. Normally
344 // we'll find the stub based on the routing ID, and associate the sync point 413 // we'll find the stub based on the routing ID, and associate the sync point
345 // with it, but if that fails for any reason (channel or stub already 414 // with it, but if that fails for any reason (channel or stub already
346 // deleted, invalid routing id), we need to retire the sync point 415 // deleted, invalid routing id), we need to retire the sync point
347 // immediately. 416 // immediately.
(...skipping 25 matching lines...) Expand all
373 scoped_refptr<SyncPointManager> sync_point_manager_; 442 scoped_refptr<SyncPointManager> sync_point_manager_;
374 scoped_refptr<base::MessageLoopProxy> message_loop_; 443 scoped_refptr<base::MessageLoopProxy> message_loop_;
375 scoped_refptr<gpu::PreemptionFlag> preempting_flag_; 444 scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
376 445
377 std::queue<PendingMessage> pending_messages_; 446 std::queue<PendingMessage> pending_messages_;
378 447
379 // Count of the number of IPCs received on this GpuChannel. 448 // Count of the number of IPCs received on this GpuChannel.
380 uint64 messages_received_; 449 uint64 messages_received_;
381 450
382 base::OneShotTimer<SyncPointMessageFilter> timer_; 451 base::OneShotTimer<SyncPointMessageFilter> timer_;
452
453 bool a_stub_is_descheduled_;
383 }; 454 };
384 455
385 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager, 456 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
386 GpuWatchdog* watchdog, 457 GpuWatchdog* watchdog,
387 gfx::GLShareGroup* share_group, 458 gfx::GLShareGroup* share_group,
388 gpu::gles2::MailboxManager* mailbox, 459 gpu::gles2::MailboxManager* mailbox,
389 int client_id, 460 int client_id,
390 bool software) 461 bool software)
391 : gpu_channel_manager_(gpu_channel_manager), 462 : gpu_channel_manager_(gpu_channel_manager),
392 messages_processed_(0), 463 messages_processed_(0),
393 client_id_(client_id), 464 client_id_(client_id),
394 share_group_(share_group ? share_group : new gfx::GLShareGroup), 465 share_group_(share_group ? share_group : new gfx::GLShareGroup),
395 mailbox_manager_(mailbox ? mailbox : new gpu::gles2::MailboxManager), 466 mailbox_manager_(mailbox ? mailbox : new gpu::gles2::MailboxManager),
396 image_manager_(new gpu::gles2::ImageManager), 467 image_manager_(new gpu::gles2::ImageManager),
397 watchdog_(watchdog), 468 watchdog_(watchdog),
398 software_(software), 469 software_(software),
399 handle_messages_scheduled_(false), 470 handle_messages_scheduled_(false),
400 processed_get_state_fast_(false), 471 processed_get_state_fast_(false),
401 currently_processing_message_(NULL), 472 currently_processing_message_(NULL),
402 weak_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { 473 weak_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)),
474 num_stubs_descheduled_(0) {
403 DCHECK(gpu_channel_manager); 475 DCHECK(gpu_channel_manager);
404 DCHECK(client_id); 476 DCHECK(client_id);
405 477
406 channel_id_ = IPC::Channel::GenerateVerifiedChannelID("gpu"); 478 channel_id_ = IPC::Channel::GenerateVerifiedChannelID("gpu");
407 const CommandLine* command_line = CommandLine::ForCurrentProcess(); 479 const CommandLine* command_line = CommandLine::ForCurrentProcess();
408 log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages); 480 log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages);
409 disallowed_features_.multisampling = 481 disallowed_features_.multisampling =
410 command_line->HasSwitch(switches::kDisableGLMultisampling); 482 command_line->HasSwitch(switches::kDisableGLMultisampling);
411 #if defined(OS_ANDROID) 483 #if defined(OS_ANDROID)
412 stream_texture_manager_.reset(new StreamTextureManagerAndroid(this)); 484 stream_texture_manager_.reset(new StreamTextureManagerAndroid(this));
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
538 // not emptied here, which ensures that OnMessageReceived will continue to 610 // not emptied here, which ensures that OnMessageReceived will continue to
539 // defer newly received messages until the ones in the queue have all been 611 // defer newly received messages until the ones in the queue have all been
540 // handled by HandleMessage. HandleMessage is invoked as a 612 // handled by HandleMessage. HandleMessage is invoked as a
541 // task to prevent reentrancy. 613 // task to prevent reentrancy.
542 MessageLoop::current()->PostTask( 614 MessageLoop::current()->PostTask(
543 FROM_HERE, 615 FROM_HERE,
544 base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr())); 616 base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr()));
545 handle_messages_scheduled_ = true; 617 handle_messages_scheduled_ = true;
546 } 618 }
547 619
620 void GpuChannel::StubSchedulingChanged(bool scheduled) {
621 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0;
622 if (scheduled) {
623 num_stubs_descheduled_--;
624 OnScheduled();
625 } else {
626 num_stubs_descheduled_++;
627 }
628 DCHECK_LE(num_stubs_descheduled_, stubs_.size());
629 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0;
630
631 if (a_stub_is_descheduled != a_stub_was_descheduled) {
632 if (preempting_flag_.get()) {
633 io_message_loop_->PostTask(
634 FROM_HERE,
635 base::Bind(&SyncPointMessageFilter::UpdateStubSchedulingState,
636 filter_, a_stub_is_descheduled));
637 }
638 }
639 }
640
548 void GpuChannel::CreateViewCommandBuffer( 641 void GpuChannel::CreateViewCommandBuffer(
549 const gfx::GLSurfaceHandle& window, 642 const gfx::GLSurfaceHandle& window,
550 int32 surface_id, 643 int32 surface_id,
551 const GPUCreateCommandBufferConfig& init_params, 644 const GPUCreateCommandBufferConfig& init_params,
552 int32* route_id) { 645 int32* route_id) {
553 TRACE_EVENT1("gpu", 646 TRACE_EVENT1("gpu",
554 "GpuChannel::CreateViewCommandBuffer", 647 "GpuChannel::CreateViewCommandBuffer",
555 "surface_id", 648 "surface_id",
556 surface_id); 649 surface_id);
557 650
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
642 735
643 void GpuChannel::RemoveRoute(int32 route_id) { 736 void GpuChannel::RemoveRoute(int32 route_id) {
644 router_.RemoveRoute(route_id); 737 router_.RemoveRoute(route_id);
645 } 738 }
646 739
647 gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() { 740 gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() {
648 if (!preempting_flag_.get()) { 741 if (!preempting_flag_.get()) {
649 preempting_flag_ = new gpu::PreemptionFlag; 742 preempting_flag_ = new gpu::PreemptionFlag;
650 io_message_loop_->PostTask( 743 io_message_loop_->PostTask(
651 FROM_HERE, 744 FROM_HERE,
652 base::Bind(&SyncPointMessageFilter::SetPreemptingFlag, 745 base::Bind(&SyncPointMessageFilter::SetPreemptingFlagAndSchedulingState,
653 filter_, preempting_flag_)); 746 filter_, preempting_flag_, num_stubs_descheduled_ > 0));
654 } 747 }
655 return preempting_flag_.get(); 748 return preempting_flag_.get();
656 } 749 }
657 750
658 void GpuChannel::SetPreemptByFlag( 751 void GpuChannel::SetPreemptByFlag(
659 scoped_refptr<gpu::PreemptionFlag> preempted_flag) { 752 scoped_refptr<gpu::PreemptionFlag> preempted_flag) {
660 preempted_flag_ = preempted_flag; 753 preempted_flag_ = preempted_flag;
661 754
662 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_); 755 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
663 !it.IsAtEnd(); it.Advance()) { 756 !it.IsAtEnd(); it.Advance()) {
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after
792 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer", 885 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
793 "route_id", route_id); 886 "route_id", route_id);
794 887
795 if (router_.ResolveRoute(route_id)) { 888 if (router_.ResolveRoute(route_id)) {
796 GpuCommandBufferStub* stub = stubs_.Lookup(route_id); 889 GpuCommandBufferStub* stub = stubs_.Lookup(route_id);
797 bool need_reschedule = (stub && !stub->IsScheduled()); 890 bool need_reschedule = (stub && !stub->IsScheduled());
798 router_.RemoveRoute(route_id); 891 router_.RemoveRoute(route_id);
799 stubs_.Remove(route_id); 892 stubs_.Remove(route_id);
800 // In case the renderer is currently blocked waiting for a sync reply from 893 // In case the renderer is currently blocked waiting for a sync reply from
801 // the stub, we need to make sure to reschedule the GpuChannel here. 894 // the stub, we need to make sure to reschedule the GpuChannel here.
802 if (need_reschedule) 895 if (need_reschedule) {
803 OnScheduled(); 896 // This stub won't get a chance to reschedule, so update the count
897 // now.
898 StubSchedulingChanged(true);
899 }
804 } 900 }
805 } 901 }
806 902
807 #if defined(OS_ANDROID) 903 #if defined(OS_ANDROID)
808 void GpuChannel::OnRegisterStreamTextureProxy( 904 void GpuChannel::OnRegisterStreamTextureProxy(
809 int32 stream_id, const gfx::Size& initial_size, int32* route_id) { 905 int32 stream_id, const gfx::Size& initial_size, int32* route_id) {
810 // Note that route_id is only used for notifications sent out from here. 906 // Note that route_id is only used for notifications sent out from here.
811 // StreamTextureManager owns all texture objects and for incoming messages 907 // StreamTextureManager owns all texture objects and for incoming messages
812 // it finds the correct object based on stream_id. 908 // it finds the correct object based on stream_id.
813 *route_id = GenerateRouteID(); 909 *route_id = GenerateRouteID();
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
850 messages_processed_++; 946 messages_processed_++;
851 if (preempting_flag_.get()) { 947 if (preempting_flag_.get()) {
852 io_message_loop_->PostTask( 948 io_message_loop_->PostTask(
853 FROM_HERE, 949 FROM_HERE,
854 base::Bind(&SyncPointMessageFilter::MessageProcessed, 950 base::Bind(&SyncPointMessageFilter::MessageProcessed,
855 filter_, messages_processed_)); 951 filter_, messages_processed_));
856 } 952 }
857 } 953 }
858 954
859 } // namespace content 955 } // namespace content
OLDNEW
« no previous file with comments | « content/common/gpu/gpu_channel.h ('k') | content/common/gpu/gpu_command_buffer_stub.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698