OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/bind.h" | 5 #include "base/bind.h" |
6 #include "base/bind_helpers.h" | 6 #include "base/bind_helpers.h" |
7 #include "base/command_line.h" | 7 #include "base/command_line.h" |
8 #include "base/debug/trace_event.h" | 8 #include "base/debug/trace_event.h" |
9 #include "base/hash.h" | 9 #include "base/hash.h" |
10 #include "base/shared_memory.h" | 10 #include "base/shared_memory.h" |
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
154 | 154 |
155 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) { | 155 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) { |
156 FastSetActiveURL(active_url_, active_url_hash_); | 156 FastSetActiveURL(active_url_, active_url_hash_); |
157 | 157 |
158 // Ensure the appropriate GL context is current before handling any IPC | 158 // Ensure the appropriate GL context is current before handling any IPC |
159 // messages directed at the command buffer. This ensures that the message | 159 // messages directed at the command buffer. This ensures that the message |
160 // handler can assume that the context is current (not necessary for | 160 // handler can assume that the context is current (not necessary for |
161 // Echo, RetireSyncPoint, or WaitSyncPoint). | 161 // Echo, RetireSyncPoint, or WaitSyncPoint). |
162 if (decoder_.get() && | 162 if (decoder_.get() && |
163 message.type() != GpuCommandBufferMsg_Echo::ID && | 163 message.type() != GpuCommandBufferMsg_Echo::ID && |
164 message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID && | 164 message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID) { |
165 message.type() != GpuCommandBufferMsg_WaitSyncPoint::ID) { | |
166 if (!MakeCurrent()) | 165 if (!MakeCurrent()) |
167 return false; | 166 return false; |
168 } | 167 } |
169 | 168 |
170 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers | 169 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers |
171 // here. This is so the reply can be delayed if the scheduler is unscheduled. | 170 // here. This is so the reply can be delayed if the scheduler is unscheduled. |
172 bool handled = true; | 171 bool handled = true; |
173 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message) | 172 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message) |
174 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize, | 173 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize, |
175 OnInitialize); | 174 OnInitialize); |
(...skipping 18 matching lines...) Expand all Loading... |
194 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyVideoDecoder, | 193 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyVideoDecoder, |
195 OnDestroyVideoDecoder) | 194 OnDestroyVideoDecoder) |
196 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible, | 195 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible, |
197 OnSetSurfaceVisible) | 196 OnSetSurfaceVisible) |
198 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DiscardBackbuffer, | 197 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DiscardBackbuffer, |
199 OnDiscardBackbuffer) | 198 OnDiscardBackbuffer) |
200 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_EnsureBackbuffer, | 199 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_EnsureBackbuffer, |
201 OnEnsureBackbuffer) | 200 OnEnsureBackbuffer) |
202 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RetireSyncPoint, | 201 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RetireSyncPoint, |
203 OnRetireSyncPoint) | 202 OnRetireSyncPoint) |
204 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_WaitSyncPoint, | |
205 OnWaitSyncPoint) | |
206 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPoint, | 203 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPoint, |
207 OnSignalSyncPoint) | 204 OnSignalSyncPoint) |
208 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SendClientManagedMemoryStats, | 205 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SendClientManagedMemoryStats, |
209 OnReceivedClientManagedMemoryStats) | 206 OnReceivedClientManagedMemoryStats) |
210 IPC_MESSAGE_HANDLER( | 207 IPC_MESSAGE_HANDLER( |
211 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback, | 208 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback, |
212 OnSetClientHasMemoryAllocationChangedCallback) | 209 OnSetClientHasMemoryAllocationChangedCallback) |
213 IPC_MESSAGE_UNHANDLED(handled = false) | 210 IPC_MESSAGE_UNHANDLED(handled = false) |
214 IPC_END_MESSAGE_MAP() | 211 IPC_END_MESSAGE_MAP() |
215 | 212 |
216 // Ensure that any delayed work that was created will be handled. | 213 // Ensure that any delayed work that was created will be handled. |
217 ScheduleDelayedWork(kHandleMoreWorkPeriodMs); | 214 ScheduleDelayedWork(kHandleMoreWorkPeriodMs); |
218 | 215 |
219 DCHECK(handled); | 216 DCHECK(handled); |
220 return handled; | 217 return handled; |
221 } | 218 } |
222 | 219 |
223 bool GpuCommandBufferStub::Send(IPC::Message* message) { | 220 bool GpuCommandBufferStub::Send(IPC::Message* message) { |
224 return channel_->Send(message); | 221 return channel_->Send(message); |
225 } | 222 } |
226 | 223 |
227 bool GpuCommandBufferStub::IsScheduled() { | 224 bool GpuCommandBufferStub::IsScheduled() { |
228 return sync_point_wait_count_ == 0 && | 225 return (!scheduler_.get() || scheduler_->IsScheduled()); |
229 (!scheduler_.get() || scheduler_->IsScheduled()); | |
230 } | 226 } |
231 | 227 |
232 bool GpuCommandBufferStub::HasMoreWork() { | 228 bool GpuCommandBufferStub::HasMoreWork() { |
233 return scheduler_.get() && scheduler_->HasMoreWork(); | 229 return scheduler_.get() && scheduler_->HasMoreWork(); |
234 } | 230 } |
235 | 231 |
236 void GpuCommandBufferStub::PollWork() { | 232 void GpuCommandBufferStub::PollWork() { |
237 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork"); | 233 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork"); |
238 delayed_work_scheduled_ = false; | 234 delayed_work_scheduled_ = false; |
239 FastSetActiveURL(active_url_, active_url_hash_); | 235 FastSetActiveURL(active_url_, active_url_hash_); |
(...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
471 } | 467 } |
472 | 468 |
473 if (CommandLine::ForCurrentProcess()->HasSwitch( | 469 if (CommandLine::ForCurrentProcess()->HasSwitch( |
474 switches::kEnableGPUServiceLogging)) { | 470 switches::kEnableGPUServiceLogging)) { |
475 decoder_->set_log_commands(true); | 471 decoder_->set_log_commands(true); |
476 } | 472 } |
477 | 473 |
478 decoder_->SetMsgCallback( | 474 decoder_->SetMsgCallback( |
479 base::Bind(&GpuCommandBufferStub::SendConsoleMessage, | 475 base::Bind(&GpuCommandBufferStub::SendConsoleMessage, |
480 base::Unretained(this))); | 476 base::Unretained(this))); |
| 477 decoder_->SetWaitSyncPointCallback( |
| 478 base::Bind(&GpuCommandBufferStub::OnWaitSyncPoint, |
| 479 base::Unretained(this))); |
481 | 480 |
482 command_buffer_->SetPutOffsetChangeCallback( | 481 command_buffer_->SetPutOffsetChangeCallback( |
483 base::Bind(&GpuCommandBufferStub::PutChanged, base::Unretained(this))); | 482 base::Bind(&GpuCommandBufferStub::PutChanged, base::Unretained(this))); |
484 command_buffer_->SetGetBufferChangeCallback( | 483 command_buffer_->SetGetBufferChangeCallback( |
485 base::Bind(&gpu::GpuScheduler::SetGetBuffer, | 484 base::Bind(&gpu::GpuScheduler::SetGetBuffer, |
486 base::Unretained(scheduler_.get()))); | 485 base::Unretained(scheduler_.get()))); |
487 command_buffer_->SetParseErrorCallback( | 486 command_buffer_->SetParseErrorCallback( |
488 base::Bind(&GpuCommandBufferStub::OnParseError, base::Unretained(this))); | 487 base::Bind(&GpuCommandBufferStub::OnParseError, base::Unretained(this))); |
489 scheduler_->SetScheduledCallback( | 488 scheduler_->SetScheduledCallback( |
490 base::Bind(&GpuCommandBufferStub::OnReschedule, base::Unretained(this))); | 489 base::Bind(&GpuCommandBufferStub::OnReschedule, base::Unretained(this))); |
(...skipping 260 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
751 sync_points_.push_back(sync_point); | 750 sync_points_.push_back(sync_point); |
752 } | 751 } |
753 | 752 |
754 void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point) { | 753 void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point) { |
755 DCHECK(!sync_points_.empty() && sync_points_.front() == sync_point); | 754 DCHECK(!sync_points_.empty() && sync_points_.front() == sync_point); |
756 sync_points_.pop_front(); | 755 sync_points_.pop_front(); |
757 GpuChannelManager* manager = channel_->gpu_channel_manager(); | 756 GpuChannelManager* manager = channel_->gpu_channel_manager(); |
758 manager->sync_point_manager()->RetireSyncPoint(sync_point); | 757 manager->sync_point_manager()->RetireSyncPoint(sync_point); |
759 } | 758 } |
760 | 759 |
761 void GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point) { | 760 bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point) { |
762 if (sync_point_wait_count_ == 0) { | 761 if (sync_point_wait_count_ == 0) { |
763 TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this, | 762 TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this, |
764 "GpuCommandBufferStub", this); | 763 "GpuCommandBufferStub", this); |
765 } | 764 } |
| 765 scheduler_->SetScheduled(false); |
766 ++sync_point_wait_count_; | 766 ++sync_point_wait_count_; |
767 GpuChannelManager* manager = channel_->gpu_channel_manager(); | 767 GpuChannelManager* manager = channel_->gpu_channel_manager(); |
768 manager->sync_point_manager()->AddSyncPointCallback( | 768 manager->sync_point_manager()->AddSyncPointCallback( |
769 sync_point, | 769 sync_point, |
770 base::Bind(&GpuCommandBufferStub::OnSyncPointRetired, | 770 base::Bind(&GpuCommandBufferStub::OnSyncPointRetired, |
771 this->AsWeakPtr())); | 771 this->AsWeakPtr())); |
| 772 return scheduler_->IsScheduled(); |
772 } | 773 } |
773 | 774 |
774 void GpuCommandBufferStub::OnSyncPointRetired() { | 775 void GpuCommandBufferStub::OnSyncPointRetired() { |
775 --sync_point_wait_count_; | 776 --sync_point_wait_count_; |
776 if (sync_point_wait_count_ == 0) { | 777 if (sync_point_wait_count_ == 0) { |
777 TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this, | 778 TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this, |
778 "GpuCommandBufferStub", this); | 779 "GpuCommandBufferStub", this); |
779 } | 780 } |
780 OnReschedule(); | 781 scheduler_->SetScheduled(true); |
781 } | 782 } |
782 | 783 |
783 void GpuCommandBufferStub::OnSignalSyncPoint(uint32 sync_point, uint32 id) { | 784 void GpuCommandBufferStub::OnSignalSyncPoint(uint32 sync_point, uint32 id) { |
784 GpuChannelManager* manager = channel_->gpu_channel_manager(); | 785 GpuChannelManager* manager = channel_->gpu_channel_manager(); |
785 manager->sync_point_manager()->AddSyncPointCallback( | 786 manager->sync_point_manager()->AddSyncPointCallback( |
786 sync_point, | 787 sync_point, |
787 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck, | 788 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck, |
788 this->AsWeakPtr(), | 789 this->AsWeakPtr(), |
789 id)); | 790 id)); |
790 } | 791 } |
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
878 if (surface_ && MakeCurrent()) | 879 if (surface_ && MakeCurrent()) |
879 surface_->SetFrontbufferAllocation( | 880 surface_->SetFrontbufferAllocation( |
880 allocation.browser_allocation.suggest_have_frontbuffer); | 881 allocation.browser_allocation.suggest_have_frontbuffer); |
881 } | 882 } |
882 | 883 |
883 last_memory_allocation_valid_ = true; | 884 last_memory_allocation_valid_ = true; |
884 last_memory_allocation_ = allocation; | 885 last_memory_allocation_ = allocation; |
885 } | 886 } |
886 | 887 |
887 } // namespace content | 888 } // namespace content |
OLD | NEW |