Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(279)

Side by Side Diff: content/common/gpu/client/gpu_channel_host.cc

Issue 1331843005: Implemented new fence syncs which replaces the old sync points. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Reverted mojo readme, changed wait() to take a pointer Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « content/common/gpu/client/gpu_channel_host.h ('k') | content/common/gpu/gpu_channel.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/gpu/client/gpu_channel_host.h" 5 #include "content/common/gpu/client/gpu_channel_host.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/atomic_sequence_num.h" 9 #include "base/atomic_sequence_num.h"
10 #include "base/bind.h" 10 #include "base/bind.h"
(...skipping 17 matching lines...) Expand all
28 28
29 namespace content { 29 namespace content {
30 namespace { 30 namespace {
31 31
32 // Global atomic to generate unique transfer buffer IDs. 32 // Global atomic to generate unique transfer buffer IDs.
33 base::StaticAtomicSequenceNumber g_next_transfer_buffer_id; 33 base::StaticAtomicSequenceNumber g_next_transfer_buffer_id;
34 34
35 } // namespace 35 } // namespace
36 36
37 GpuChannelHost::StreamFlushInfo::StreamFlushInfo() 37 GpuChannelHost::StreamFlushInfo::StreamFlushInfo()
38 : flush_pending(false), 38 : next_stream_flush_id(1),
39 flushed_stream_flush_id(0),
40 verified_stream_flush_id(0),
41 flush_pending(false),
39 route_id(MSG_ROUTING_NONE), 42 route_id(MSG_ROUTING_NONE),
40 put_offset(0), 43 put_offset(0),
41 flush_count(0) {} 44 flush_count(0),
45 flush_id(0) {}
42 46
43 GpuChannelHost::StreamFlushInfo::~StreamFlushInfo() {} 47 GpuChannelHost::StreamFlushInfo::~StreamFlushInfo() {}
44 48
45 // static 49 // static
46 scoped_refptr<GpuChannelHost> GpuChannelHost::Create( 50 scoped_refptr<GpuChannelHost> GpuChannelHost::Create(
47 GpuChannelHostFactory* factory, 51 GpuChannelHostFactory* factory,
48 int channel_id, 52 int channel_id,
49 const gpu::GPUInfo& gpu_info, 53 const gpu::GPUInfo& gpu_info,
50 const IPC::ChannelHandle& channel_handle, 54 const IPC::ChannelHandle& channel_handle,
51 base::WaitableEvent* shutdown_event, 55 base::WaitableEvent* shutdown_event,
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
120 bool result = channel_->Send(message.release()); 124 bool result = channel_->Send(message.release());
121 if (!result) 125 if (!result)
122 DVLOG(1) << "GpuChannelHost::Send failed: Channel::Send failed"; 126 DVLOG(1) << "GpuChannelHost::Send failed: Channel::Send failed";
123 return result; 127 return result;
124 } 128 }
125 129
126 bool result = sync_filter_->Send(message.release()); 130 bool result = sync_filter_->Send(message.release());
127 return result; 131 return result;
128 } 132 }
129 133
130 void GpuChannelHost::OrderingBarrier( 134 uint32_t GpuChannelHost::OrderingBarrier(
131 int32 route_id, 135 int32 route_id,
132 int32 stream_id, 136 int32 stream_id,
133 int32 put_offset, 137 int32 put_offset,
134 uint32 flush_count, 138 uint32 flush_count,
135 const std::vector<ui::LatencyInfo>& latency_info, 139 const std::vector<ui::LatencyInfo>& latency_info,
136 bool put_offset_changed, 140 bool put_offset_changed,
137 bool do_flush) { 141 bool do_flush) {
138 AutoLock lock(context_lock_); 142 AutoLock lock(context_lock_);
139 StreamFlushInfo& flush_info = stream_flush_info_[stream_id]; 143 StreamFlushInfo& flush_info = stream_flush_info_[stream_id];
140 if (flush_info.flush_pending && flush_info.route_id != route_id) 144 if (flush_info.flush_pending && flush_info.route_id != route_id)
141 InternalFlush(stream_id); 145 InternalFlush(&flush_info);
142 146
143 if (put_offset_changed) { 147 if (put_offset_changed) {
148 const uint32_t flush_id = flush_info.next_stream_flush_id++;
144 flush_info.flush_pending = true; 149 flush_info.flush_pending = true;
145 flush_info.route_id = route_id; 150 flush_info.route_id = route_id;
146 flush_info.put_offset = put_offset; 151 flush_info.put_offset = put_offset;
147 flush_info.flush_count = flush_count; 152 flush_info.flush_count = flush_count;
153 flush_info.flush_id = flush_id;
148 flush_info.latency_info.insert(flush_info.latency_info.end(), 154 flush_info.latency_info.insert(flush_info.latency_info.end(),
149 latency_info.begin(), latency_info.end()); 155 latency_info.begin(), latency_info.end());
150 156
151 if (do_flush) 157 if (do_flush)
152 InternalFlush(stream_id); 158 InternalFlush(&flush_info);
159
160 return flush_id;
153 } 161 }
162 return 0;
154 } 163 }
155 164
156 void GpuChannelHost::InternalFlush(int32 stream_id) { 165 void GpuChannelHost::InternalFlush(StreamFlushInfo* flush_info) {
157 context_lock_.AssertAcquired(); 166 context_lock_.AssertAcquired();
158 StreamFlushInfo& flush_info = stream_flush_info_[stream_id]; 167 DCHECK(flush_info);
159 DCHECK(flush_info.flush_pending); 168 DCHECK(flush_info->flush_pending);
169 DCHECK_LT(flush_info->flushed_stream_flush_id, flush_info->flush_id);
160 Send(new GpuCommandBufferMsg_AsyncFlush( 170 Send(new GpuCommandBufferMsg_AsyncFlush(
161 flush_info.route_id, flush_info.put_offset, flush_info.flush_count, 171 flush_info->route_id, flush_info->put_offset, flush_info->flush_count,
162 flush_info.latency_info)); 172 flush_info->latency_info));
163 flush_info.latency_info.clear(); 173 flush_info->latency_info.clear();
164 flush_info.flush_pending = false; 174 flush_info->flush_pending = false;
175
176 flush_info->flushed_stream_flush_id = flush_info->flush_id;
165 } 177 }
166 178
167 scoped_ptr<CommandBufferProxyImpl> GpuChannelHost::CreateViewCommandBuffer( 179 scoped_ptr<CommandBufferProxyImpl> GpuChannelHost::CreateViewCommandBuffer(
168 int32 surface_id, 180 int32 surface_id,
169 CommandBufferProxyImpl* share_group, 181 CommandBufferProxyImpl* share_group,
170 int32 stream_id, 182 int32 stream_id,
171 GpuStreamPriority stream_priority, 183 GpuStreamPriority stream_priority,
172 const std::vector<int32>& attribs, 184 const std::vector<int32>& attribs,
173 const GURL& active_url, 185 const GURL& active_url,
174 gfx::GpuPreference gpu_preference) { 186 gfx::GpuPreference gpu_preference) {
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after
283 void GpuChannelHost::DestroyCommandBuffer( 295 void GpuChannelHost::DestroyCommandBuffer(
284 CommandBufferProxyImpl* command_buffer) { 296 CommandBufferProxyImpl* command_buffer) {
285 TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer"); 297 TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer");
286 298
287 int32 route_id = command_buffer->route_id(); 299 int32 route_id = command_buffer->route_id();
288 int32 stream_id = command_buffer->stream_id(); 300 int32 stream_id = command_buffer->stream_id();
289 Send(new GpuChannelMsg_DestroyCommandBuffer(route_id)); 301 Send(new GpuChannelMsg_DestroyCommandBuffer(route_id));
290 RemoveRoute(route_id); 302 RemoveRoute(route_id);
291 303
292 AutoLock lock(context_lock_); 304 AutoLock lock(context_lock_);
293 if (stream_flush_info_[stream_id].route_id == route_id) 305 StreamFlushInfo& flush_info = stream_flush_info_[stream_id];
294 stream_flush_info_.erase(stream_id); 306 if (flush_info.flush_pending && flush_info.route_id == route_id)
307 flush_info.flush_pending = false;
295 } 308 }
296 309
297 void GpuChannelHost::DestroyChannel() { 310 void GpuChannelHost::DestroyChannel() {
298 DCHECK(factory_->IsMainThread()); 311 DCHECK(factory_->IsMainThread());
299 AutoLock lock(context_lock_); 312 AutoLock lock(context_lock_);
300 channel_.reset(); 313 channel_.reset();
301 } 314 }
302 315
303 void GpuChannelHost::AddRoute( 316 void GpuChannelHost::AddRoute(
304 int route_id, base::WeakPtr<IPC::Listener> listener) { 317 int route_id, base::WeakPtr<IPC::Listener> listener) {
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
377 } 390 }
378 391
379 int32 GpuChannelHost::GenerateRouteID() { 392 int32 GpuChannelHost::GenerateRouteID() {
380 return next_route_id_.GetNext(); 393 return next_route_id_.GetNext();
381 } 394 }
382 395
383 int32 GpuChannelHost::GenerateStreamID() { 396 int32 GpuChannelHost::GenerateStreamID() {
384 return next_stream_id_.GetNext(); 397 return next_stream_id_.GetNext();
385 } 398 }
386 399
400 uint32_t GpuChannelHost::ValidateFlushIDReachedServer(int32 stream_id) {
401 // Store what flush ids we will be validating for all streams.
402 base::hash_map<int32, uint32_t> validate_flushes;
403 uint32_t flushed_stream_flush_id = 0;
404 uint32_t verified_stream_flush_id = 0;
405 {
406 AutoLock lock(context_lock_);
407 for (const auto& iter : stream_flush_info_) {
408 const int32 iter_stream_id = iter.first;
409 const StreamFlushInfo& flush_info = iter.second;
410 if (iter_stream_id == stream_id) {
411 flushed_stream_flush_id = flush_info.flushed_stream_flush_id;
412 verified_stream_flush_id = flush_info.verified_stream_flush_id;
413 }
414
415 if (flush_info.flushed_stream_flush_id >
416 flush_info.verified_stream_flush_id) {
417 validate_flushes.insert(
418 std::make_pair(iter_stream_id, flush_info.flushed_stream_flush_id));
419 }
420 }
421 }
422
423 if (flushed_stream_flush_id == verified_stream_flush_id) {
424 // Current stream has no unverified flushes.
425 return verified_stream_flush_id;
426 }
427
428 if (Send(new GpuChannelMsg_Nop())) {
429 // Update verified flush id for all streams.
430 uint32_t highest_flush_id = 0;
431 AutoLock lock(context_lock_);
432 for (const auto& iter : validate_flushes) {
433 const int32 validated_stream_id = iter.first;
434 const uint32_t validated_flush_id = iter.second;
435 StreamFlushInfo& flush_info = stream_flush_info_[validated_stream_id];
436 if (flush_info.verified_stream_flush_id < validated_flush_id) {
437 flush_info.verified_stream_flush_id = validated_flush_id;
438 }
439
440 if (validated_stream_id == stream_id)
441 highest_flush_id = flush_info.verified_stream_flush_id;
442 }
443
444 return highest_flush_id;
445 }
446
447 return 0;
448 }
449
450 uint32_t GpuChannelHost::GetHighestValidatedFlushID(int32 stream_id) {
451 AutoLock lock(context_lock_);
452 StreamFlushInfo& flush_info = stream_flush_info_[stream_id];
453 return flush_info.verified_stream_flush_id;
454 }
455
387 GpuChannelHost::~GpuChannelHost() { 456 GpuChannelHost::~GpuChannelHost() {
388 #if DCHECK_IS_ON() 457 #if DCHECK_IS_ON()
389 AutoLock lock(context_lock_); 458 AutoLock lock(context_lock_);
390 DCHECK(!channel_) 459 DCHECK(!channel_)
391 << "GpuChannelHost::DestroyChannel must be called before destruction."; 460 << "GpuChannelHost::DestroyChannel must be called before destruction.";
392 #endif 461 #endif
393 } 462 }
394 463
395 GpuChannelHost::MessageFilter::ListenerInfo::ListenerInfo() {} 464 GpuChannelHost::MessageFilter::ListenerInfo::ListenerInfo() {}
396 465
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
455 524
456 listeners_.clear(); 525 listeners_.clear();
457 } 526 }
458 527
459 bool GpuChannelHost::MessageFilter::IsLost() const { 528 bool GpuChannelHost::MessageFilter::IsLost() const {
460 AutoLock lock(lock_); 529 AutoLock lock(lock_);
461 return lost_; 530 return lost_;
462 } 531 }
463 532
464 } // namespace content 533 } // namespace content
OLDNEW
« no previous file with comments | « content/common/gpu/client/gpu_channel_host.h ('k') | content/common/gpu/gpu_channel.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698