Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(764)

Unified Diff: gpu/command_buffer/service/sync_point_manager.h

Issue 1331843005: Implemented new fence syncs which replaces the old sync points. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Reverted mojo readme, changed wait() to take a pointer Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: gpu/command_buffer/service/sync_point_manager.h
diff --git a/gpu/command_buffer/service/sync_point_manager.h b/gpu/command_buffer/service/sync_point_manager.h
index 3f11e05dabbe407ea9c27e018c759701e3fedbb3..a72566b0dc844dca20d360b2ef907b5530868088 100644
--- a/gpu/command_buffer/service/sync_point_manager.h
+++ b/gpu/command_buffer/service/sync_point_manager.h
@@ -5,6 +5,8 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_SYNC_POINT_MANAGER_H_
#define GPU_COMMAND_BUFFER_SERVICE_SYNC_POINT_MANAGER_H_
+#include <functional>
+#include <queue>
#include <vector>
#include "base/atomic_sequence_num.h"
@@ -19,36 +21,34 @@
#include "gpu/command_buffer/common/constants.h"
#include "gpu/gpu_export.h"
+namespace base {
+class SingleThreadTaskRunner;
+} // namespace base
+
namespace gpu {
class SyncPointClient;
+class SyncPointClientState;
class SyncPointManager;
-class GPU_EXPORT SyncPointClientState
- : public base::RefCountedThreadSafe<SyncPointClientState> {
+class GPU_EXPORT SyncPointOrderData
+ : public base::RefCountedThreadSafe<SyncPointOrderData> {
public:
- static scoped_refptr<SyncPointClientState> Create();
- uint32_t GenerateUnprocessedOrderNumber(SyncPointManager* sync_point_manager);
-
- void BeginProcessingOrderNumber(uint32_t order_num) {
- DCHECK(processing_thread_checker_.CalledOnValidThread());
- DCHECK_GE(order_num, current_order_num_);
- current_order_num_ = order_num;
- }
+ static scoped_refptr<SyncPointOrderData> Create();
+ void Destroy();
- void FinishProcessingOrderNumber(uint32_t order_num) {
- DCHECK(processing_thread_checker_.CalledOnValidThread());
- DCHECK_EQ(current_order_num_, order_num);
- DCHECK_GT(order_num, processed_order_num());
- base::subtle::Release_Store(&processed_order_num_, order_num);
- }
+ uint32_t GenerateUnprocessedOrderNumber(SyncPointManager* sync_point_manager);
+ void BeginProcessingOrderNumber(uint32_t order_num);
+ void FinishProcessingOrderNumber(uint32_t order_num);
uint32_t processed_order_num() const {
- return base::subtle::Acquire_Load(&processed_order_num_);
+ base::AutoLock auto_lock(lock_);
+ return processed_order_num_;
}
uint32_t unprocessed_order_num() const {
- return base::subtle::Acquire_Load(&unprocessed_order_num_);
+ base::AutoLock auto_lock(lock_);
+ return unprocessed_order_num_;
}
uint32_t current_order_num() const {
@@ -56,18 +56,37 @@ class GPU_EXPORT SyncPointClientState
return current_order_num_;
}
- protected:
- friend class base::RefCountedThreadSafe<SyncPointClientState>;
- friend class SyncPointClient;
-
- SyncPointClientState();
- virtual ~SyncPointClientState();
-
- // Last finished IPC order number.
- base::subtle::Atomic32 processed_order_num_;
+ private:
+ friend class base::RefCountedThreadSafe<SyncPointOrderData>;
+ friend class SyncPointClientState;
- // Unprocessed order number expected to be processed under normal execution.
- base::subtle::Atomic32 unprocessed_order_num_;
+ struct OrderFence {
+ uint32_t order_num;
+ uint64_t fence_release;
+ scoped_refptr<SyncPointClientState> client_state;
+
+ OrderFence(uint32_t order,
+ uint64_t release,
+ scoped_refptr<SyncPointClientState> state);
+ ~OrderFence();
+
+ bool operator>(const OrderFence& rhs) const {
+ return (order_num > rhs.order_num) ||
+ ((order_num == rhs.order_num) &&
+ (fence_release > rhs.fence_release));
+ }
+ };
+ typedef std::priority_queue<OrderFence,
+ std::vector<OrderFence>,
+ std::greater<OrderFence>> OrderFenceQueue;
+
+ SyncPointOrderData();
+ ~SyncPointOrderData();
+
+ bool ValidateReleaseOrderNumber(
+ scoped_refptr<SyncPointClientState> client_state,
+ uint32_t wait_order_num,
+ uint64_t fence_release);
// Non thread-safe functions need to be called from a single thread.
base::ThreadChecker processing_thread_checker_;
@@ -75,6 +94,95 @@ class GPU_EXPORT SyncPointClientState
// Current IPC order number being processed (only used on processing thread).
uint32_t current_order_num_;
+ // This lock protects destroyed_, processed_order_num_,
+ // unprocessed_order_num_, and order_fence_queue_. All order numbers (n) in
+ // order_fence_queue_ must follow the invariant:
+ // processed_order_num_ < n <= unprocessed_order_num_.
+ mutable base::Lock lock_;
+
+ bool destroyed_;
+
+ // Last finished IPC order number.
+ uint32_t processed_order_num_;
+
+ // Unprocessed order number expected to be processed under normal execution.
+ uint32_t unprocessed_order_num_;
+
+ // In situations where we are waiting on fence syncs that do not exist, we
+ // validate by making sure the order number does not pass the order number
+ // which the wait command was issued. If the order number reaches the
+ // wait command's, we should automatically release up to the expected
+ // release count. Note that this also releases other lower release counts,
+ // so a single misbehaved fence sync is enough to invalidate/signal all
+ // previous fence syncs.
+ OrderFenceQueue order_fence_queue_;
+
+ DISALLOW_COPY_AND_ASSIGN(SyncPointOrderData);
+};
+
+class GPU_EXPORT SyncPointClientState
+ : public base::RefCountedThreadSafe<SyncPointClientState> {
+ public:
+ scoped_refptr<SyncPointOrderData> order_data() { return order_data_; }
+
+ bool IsFenceSyncReleased(uint64_t release) {
+ return release <= fence_sync_release();
+ }
+
+ uint64_t fence_sync_release() {
+ base::AutoLock auto_lock(fence_sync_lock_);
+ return fence_sync_release_;
+ }
+
+ private:
+ friend class base::RefCountedThreadSafe<SyncPointClientState>;
+ friend class SyncPointClient;
+ friend class SyncPointOrderData;
+
+ struct ReleaseCallback {
+ uint64_t release_count;
+ base::Closure callback_closure;
+
+ ReleaseCallback(uint64_t release, const base::Closure& callback);
+ ~ReleaseCallback();
+
+ bool operator>(const ReleaseCallback& rhs) const {
+ return release_count > rhs.release_count;
+ }
+ };
+ typedef std::priority_queue<ReleaseCallback,
+ std::vector<ReleaseCallback>,
+ std::greater<ReleaseCallback>>
+ ReleaseCallbackQueue;
+
+ SyncPointClientState(scoped_refptr<SyncPointOrderData> order_data);
+ ~SyncPointClientState();
+
+ // Queues the callback to be called if the release is valid. If the release
+ // is invalid this function will return False and the callback will never
+ // be called.
+ bool WaitForRelease(uint32_t wait_order_num,
+ uint64_t release,
+ const base::Closure& callback);
+
+ void ReleaseFenceSync(uint64_t release);
+ void EnsureReleased(uint64_t release);
+ void ReleaseFenceSyncLocked(uint64_t release,
+ std::vector<base::Closure>* callback_list);
+
+ // Global order data where releases will originate from.
+ scoped_refptr<SyncPointOrderData> order_data_;
+
+ // Protects fence_sync_release_, fence_callback_queue_.
+ base::Lock fence_sync_lock_;
+
+ // Current fence sync release that has been signaled.
+ uint64_t fence_sync_release_;
+
+ // In well defined fence sync operations, fence syncs are released in order
+ // so simply having a priority queue for callbacks is enough.
+ ReleaseCallbackQueue release_callback_queue_;
+
DISALLOW_COPY_AND_ASSIGN(SyncPointClientState);
};
@@ -84,12 +192,32 @@ class GPU_EXPORT SyncPointClient {
scoped_refptr<SyncPointClientState> client_state() { return client_state_; }
+ // Wait for a release count to be reached on a SyncPointClientState. If this
+ // function returns false, that means the wait was invalid. Otherwise if it
+ // returns True it means the release was valid. In the case where the release
+ // is valid but has happened already, it will still return true. In all cases
+ // wait_complete_callback will be called eventually. The callback function
+ // may be called on another thread so it should be thread-safe. For
+ // convenience, another non-threadsafe version is defined below where you
+ // can supply a task runner.
+ bool Wait(SyncPointClientState* release_state,
+ uint64_t release_count,
+ const base::Closure& wait_complete_callback);
+
+ bool WaitNonThreadSafe(SyncPointClientState* release_state,
+ uint64_t release_count,
+ scoped_refptr<base::SingleThreadTaskRunner> runner,
+ const base::Closure& wait_complete_callback);
+
+ void ReleaseFenceSync(uint64_t release);
+
private:
friend class SyncPointManager;
SyncPointClient(SyncPointManager* sync_point_manager,
- scoped_refptr<SyncPointClientState> state,
- CommandBufferNamespace namespace_id, uint64_t client_id);
+ scoped_refptr<SyncPointOrderData> order_data,
+ CommandBufferNamespace namespace_id,
+ uint64_t client_id);
// Sync point manager is guaranteed to exist in the lifetime of the client.
SyncPointManager* sync_point_manager_;
@@ -98,8 +226,8 @@ class GPU_EXPORT SyncPointClient {
scoped_refptr<SyncPointClientState> client_state_;
// Unique namespace/client id pair for this sync point client.
- CommandBufferNamespace namespace_id_;
- uint64_t client_id_;
+ const CommandBufferNamespace namespace_id_;
+ const uint64_t client_id_;
DISALLOW_COPY_AND_ASSIGN(SyncPointClient);
};
@@ -113,8 +241,9 @@ class GPU_EXPORT SyncPointManager {
// Creates/Destroy a sync point client which message processors should hold.
scoped_ptr<SyncPointClient> CreateSyncPointClient(
- scoped_refptr<SyncPointClientState> client_state,
- CommandBufferNamespace namespace_id, uint64_t client_id);
+ scoped_refptr<SyncPointOrderData> order_data,
+ CommandBufferNamespace namespace_id,
+ uint64_t client_id);
// Finds the state of an already created sync point client.
scoped_refptr<SyncPointClientState> GetSyncPointClientState(
@@ -142,7 +271,7 @@ class GPU_EXPORT SyncPointManager {
private:
friend class SyncPointClient;
- friend class SyncPointClientState;
+ friend class SyncPointOrderData;
typedef std::vector<base::Closure> ClosureList;
typedef base::hash_map<uint32, ClosureList> SyncPointMap;
« no previous file with comments | « gpu/command_buffer/service/mailbox_manager_unittest.cc ('k') | gpu/command_buffer/service/sync_point_manager.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698