Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(203)

Side by Side Diff: base/debug/trace_event_impl.cc

Issue 22962004: Thread-local trace-event buffers (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Update browser_shutdown_profile_dumper Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/debug/trace_event_impl.h ('k') | base/debug/trace_event_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/debug/trace_event_impl.h" 5 #include "base/debug/trace_event_impl.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/base_switches.h" 9 #include "base/base_switches.h"
10 #include "base/bind.h" 10 #include "base/bind.h"
11 #include "base/command_line.h" 11 #include "base/command_line.h"
12 #include "base/debug/leak_annotations.h" 12 #include "base/debug/leak_annotations.h"
13 #include "base/debug/trace_event.h" 13 #include "base/debug/trace_event.h"
14 #include "base/format_macros.h" 14 #include "base/format_macros.h"
15 #include "base/lazy_instance.h" 15 #include "base/lazy_instance.h"
16 #include "base/memory/singleton.h" 16 #include "base/memory/singleton.h"
17 #include "base/message_loop/message_loop.h"
17 #include "base/process/process_metrics.h" 18 #include "base/process/process_metrics.h"
18 #include "base/stl_util.h" 19 #include "base/stl_util.h"
19 #include "base/strings/string_split.h" 20 #include "base/strings/string_split.h"
20 #include "base/strings/string_tokenizer.h" 21 #include "base/strings/string_tokenizer.h"
21 #include "base/strings/string_util.h" 22 #include "base/strings/string_util.h"
22 #include "base/strings/stringprintf.h" 23 #include "base/strings/stringprintf.h"
23 #include "base/strings/utf_string_conversions.h" 24 #include "base/strings/utf_string_conversions.h"
24 #include "base/synchronization/cancellation_flag.h" 25 #include "base/synchronization/cancellation_flag.h"
25 #include "base/synchronization/waitable_event.h" 26 #include "base/synchronization/waitable_event.h"
26 #include "base/sys_info.h" 27 #include "base/sys_info.h"
27 #include "base/third_party/dynamic_annotations/dynamic_annotations.h" 28 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
28 #include "base/threading/platform_thread.h" 29 #include "base/threading/platform_thread.h"
29 #include "base/threading/thread_id_name_manager.h" 30 #include "base/threading/thread_id_name_manager.h"
30 #include "base/threading/thread_local.h"
31 #include "base/time/time.h" 31 #include "base/time/time.h"
32 32
33 #if defined(OS_WIN) 33 #if defined(OS_WIN)
34 #include "base/debug/trace_event_win.h" 34 #include "base/debug/trace_event_win.h"
35 #endif 35 #endif
36 36
37 class DeleteTraceLogForTesting { 37 class DeleteTraceLogForTesting {
38 public: 38 public:
39 static void Delete() { 39 static void Delete() {
40 Singleton<base::debug::TraceLog, 40 Singleton<base::debug::TraceLog,
41 LeakySingletonTraits<base::debug::TraceLog> >::OnExit(0); 41 LeakySingletonTraits<base::debug::TraceLog> >::OnExit(0);
42 } 42 }
43 }; 43 };
44 44
45 // The thread buckets for the sampling profiler. 45 // The thread buckets for the sampling profiler.
46 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3]; 46 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
47 47
48 namespace base { 48 namespace base {
49 namespace debug { 49 namespace debug {
50 50
51 namespace {
52
53 // The overhead of TraceEvent above this threshold will be reported in the
54 // trace.
55 const int kOverheadReportThresholdInMicroseconds = 50;
56
51 // Controls the number of trace events we will buffer in-memory 57 // Controls the number of trace events we will buffer in-memory
52 // before throwing them away. 58 // before throwing them away.
53 const size_t kTraceEventVectorBufferSize = 250000; 59 const size_t kTraceEventVectorBufferSize = 250000;
54 const size_t kTraceEventRingBufferSize = kTraceEventVectorBufferSize / 4; 60 const size_t kTraceEventRingBufferSize = kTraceEventVectorBufferSize / 4;
61 const size_t kTraceEventThreadLocalBufferSize = 64;
55 const size_t kTraceEventBatchSize = 1000; 62 const size_t kTraceEventBatchSize = 1000;
56 const size_t kTraceEventInitialBufferSize = 1024; 63 const size_t kTraceEventInitialBufferSize = 1024;
57 64
65 const int kThreadFlushTimeoutMs = 1000;
66
58 #define MAX_CATEGORY_GROUPS 100 67 #define MAX_CATEGORY_GROUPS 100
59 68
60 namespace {
61
62 // Parallel arrays g_category_groups and g_category_group_enabled are separate 69 // Parallel arrays g_category_groups and g_category_group_enabled are separate
63 // so that a pointer to a member of g_category_group_enabled can be easily 70 // so that a pointer to a member of g_category_group_enabled can be easily
64 // converted to an index into g_category_groups. This allows macros to deal 71 // converted to an index into g_category_groups. This allows macros to deal
65 // only with char enabled pointers from g_category_group_enabled, and we can 72 // only with char enabled pointers from g_category_group_enabled, and we can
66 // convert internally to determine the category name from the char enabled 73 // convert internally to determine the category name from the char enabled
67 // pointer. 74 // pointer.
68 const char* g_category_groups[MAX_CATEGORY_GROUPS] = { 75 const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
69 "tracing already shutdown", 76 "tracing already shutdown",
70 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS", 77 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
71 "__metadata", 78 "__metadata",
72 }; 79 // For reporting trace_event overhead. For thread local event buffers only.
80 "trace_event_overhead"};
73 81
74 // The enabled flag is char instead of bool so that the API can be used from C. 82 // The enabled flag is char instead of bool so that the API can be used from C.
75 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = { 0 }; 83 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = { 0 };
76 const int g_category_already_shutdown = 0; 84 const int g_category_already_shutdown = 0;
77 const int g_category_categories_exhausted = 1; 85 const int g_category_categories_exhausted = 1;
78 const int g_category_metadata = 2; 86 const int g_category_metadata = 2;
79 const int g_num_builtin_categories = 3; 87 const int g_category_trace_event_overhead = 3;
88 const int g_num_builtin_categories = 4;
80 int g_category_index = g_num_builtin_categories; // Skip default categories. 89 int g_category_index = g_num_builtin_categories; // Skip default categories.
81 90
82 // The name of the current thread. This is used to decide if the current 91 // The name of the current thread. This is used to decide if the current
83 // thread name has changed. We combine all the seen thread names into the 92 // thread name has changed. We combine all the seen thread names into the
84 // output name for the thread. 93 // output name for the thread.
85 LazyInstance<ThreadLocalPointer<const char> >::Leaky 94 LazyInstance<ThreadLocalPointer<const char> >::Leaky
86 g_current_thread_name = LAZY_INSTANCE_INITIALIZER; 95 g_current_thread_name = LAZY_INSTANCE_INITIALIZER;
87 96
88 const char kRecordUntilFull[] = "record-until-full"; 97 const char kRecordUntilFull[] = "record-until-full";
89 const char kRecordContinuously[] = "record-continuously"; 98 const char kRecordContinuously[] = "record-continuously";
90 const char kEnableSampling[] = "enable-sampling"; 99 const char kEnableSampling[] = "enable-sampling";
91 100
92 } // namespace 101 TimeTicks ThreadNow() {
102 return TimeTicks::IsThreadNowSupported() ?
103 TimeTicks::ThreadNow() : TimeTicks();
104 }
93 105
94 class TraceBufferRingBuffer : public TraceBuffer { 106 class TraceBufferRingBuffer : public TraceBuffer {
95 public: 107 public:
96 TraceBufferRingBuffer() 108 TraceBufferRingBuffer()
97 : unused_event_index_(0), 109 : unused_event_index_(0),
98 oldest_event_index_(0) { 110 oldest_event_index_(0) {
99 logged_events_.reserve(kTraceEventInitialBufferSize); 111 logged_events_.reserve(kTraceEventInitialBufferSize);
100 } 112 }
101 113
102 virtual ~TraceBufferRingBuffer() {} 114 virtual ~TraceBufferRingBuffer() {}
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
176 class TraceBufferVector : public TraceBuffer { 188 class TraceBufferVector : public TraceBuffer {
177 public: 189 public:
178 TraceBufferVector() : current_iteration_index_(0) { 190 TraceBufferVector() : current_iteration_index_(0) {
179 logged_events_.reserve(kTraceEventInitialBufferSize); 191 logged_events_.reserve(kTraceEventInitialBufferSize);
180 } 192 }
181 193
182 virtual ~TraceBufferVector() { 194 virtual ~TraceBufferVector() {
183 } 195 }
184 196
185 virtual void AddEvent(const TraceEvent& event) OVERRIDE { 197 virtual void AddEvent(const TraceEvent& event) OVERRIDE {
186 // Note, we have two callers which need to be handled. The first is 198 // Note, we have two callers which need to be handled:
187 // AddTraceEventWithThreadIdAndTimestamp() which checks Size() and does an 199 // - AddEventToMainBufferWhileLocked() which has two cases:
188 // early exit if full. The second is AddThreadNameMetadataEvents(). 200 // - called directly from AddTraceEventWithThreadIdAndTimeStamp()
201 // which checks if buffer is full and does an early exit if full;
202 // - called from ThreadLocalEventBuffer::FlushWhileLocked();
203 // - AddThreadNameMetadataEvents().
189 // We can not DECHECK(!IsFull()) because we have to add the metadata 204 // We can not DECHECK(!IsFull()) because we have to add the metadata
190 // events even if the buffer is full. 205 // events and flush thread-local buffers even if the buffer is full.
191 logged_events_.push_back(event); 206 logged_events_.push_back(event);
192 } 207 }
193 208
194 virtual bool HasMoreEvents() const OVERRIDE { 209 virtual bool HasMoreEvents() const OVERRIDE {
195 return current_iteration_index_ < Size(); 210 return current_iteration_index_ < Size();
196 } 211 }
197 212
198 virtual const TraceEvent& NextEvent() OVERRIDE { 213 virtual const TraceEvent& NextEvent() OVERRIDE {
199 DCHECK(HasMoreEvents()); 214 DCHECK(HasMoreEvents());
200 return GetEventAt(current_iteration_index_++); 215 return GetEventAt(current_iteration_index_++);
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
262 277
263 // As this buffer is never full, we can return any positive number. 278 // As this buffer is never full, we can return any positive number.
264 virtual size_t Capacity() const OVERRIDE { return 1; } 279 virtual size_t Capacity() const OVERRIDE { return 1; }
265 280
266 virtual const TraceEvent& GetEventAt(size_t index) const OVERRIDE { 281 virtual const TraceEvent& GetEventAt(size_t index) const OVERRIDE {
267 NOTREACHED(); 282 NOTREACHED();
268 return *static_cast<TraceEvent*>(NULL); 283 return *static_cast<TraceEvent*>(NULL);
269 } 284 }
270 }; 285 };
271 286
287 } // namespace
288
272 //////////////////////////////////////////////////////////////////////////////// 289 ////////////////////////////////////////////////////////////////////////////////
273 // 290 //
274 // TraceEvent 291 // TraceEvent
275 // 292 //
276 //////////////////////////////////////////////////////////////////////////////// 293 ////////////////////////////////////////////////////////////////////////////////
277 294
278 namespace { 295 namespace {
279 296
280 size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; } 297 size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; }
281 298
(...skipping 475 matching lines...) Expand 10 before | Expand all | Expand 10 after
757 774
758 TraceBucketData::~TraceBucketData() { 775 TraceBucketData::~TraceBucketData() {
759 } 776 }
760 777
761 //////////////////////////////////////////////////////////////////////////////// 778 ////////////////////////////////////////////////////////////////////////////////
762 // 779 //
763 // TraceLog 780 // TraceLog
764 // 781 //
765 //////////////////////////////////////////////////////////////////////////////// 782 ////////////////////////////////////////////////////////////////////////////////
766 783
784 class TraceLog::ThreadLocalEventBuffer
785 : public MessageLoop::DestructionObserver {
786 public:
787 ThreadLocalEventBuffer(TraceLog* trace_log);
788 virtual ~ThreadLocalEventBuffer();
789
790 void AddEvent(const TraceEvent& event, NotificationHelper* notifier);
791 void ReportOverhead(const TimeTicks& event_timestamp,
792 const TimeTicks& event_thread_timestamp);
793
794 private:
795 // MessageLoop::DestructionObserver
796 virtual void WillDestroyCurrentMessageLoop() OVERRIDE;
797
798 void FlushWhileLocked(NotificationHelper* notifier);
799
800 void CheckThisIsCurrentBuffer() {
801 DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
802 }
803
804 // Since TraceLog is a leaky singleton, trace_log_ will always be valid
805 // as long as the thread exists.
806 TraceLog* trace_log_;
807 std::vector<TraceEvent> logged_events_;
808 int event_count_;
809 TimeDelta overhead_;
810
811 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer);
812 };
813
814 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
815 : trace_log_(trace_log),
816 event_count_(0) {
817 logged_events_.reserve(kTraceEventThreadLocalBufferSize);
818
819 if (g_category_group_enabled[g_category_trace_event_overhead]) {
820 int thread_id = static_cast<int>(PlatformThread::CurrentId());
821 logged_events_.push_back(TraceEvent(
822 thread_id,
823 TimeTicks::NowFromSystemTraceTime() - trace_log->time_offset_,
824 ThreadNow(),
825 TRACE_EVENT_PHASE_ASYNC_BEGIN,
826 &g_category_group_enabled[g_category_trace_event_overhead],
827 "thread_trace_event",
828 thread_id,
829 0, NULL, NULL, NULL, NULL,
830 TRACE_EVENT_FLAG_HAS_ID));
831 }
832
833 // ThreadLocalEventBuffer is created only if the thread has a message loop, so
834 // the following message_loop won't be NULL.
835 MessageLoop* message_loop = MessageLoop::current();
836 message_loop->AddDestructionObserver(this);
837
838 AutoLock lock(trace_log->lock_);
839 trace_log->thread_message_loops_.insert(message_loop);
840 }
841
842 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
843 CheckThisIsCurrentBuffer();
844 MessageLoop::current()->RemoveDestructionObserver(this);
845
846 // Zero event_count_ happens in either of the following cases:
847 // - no event generated for the thread;
848 // - the thread has no message loop;
849 // - trace_event_overhead is disabled.
850 // The trace-viewer will ignore the TRACE_EVENT_PHASE_ASYNC_BEGIN event
851 // because of no matching TRACE_EVENT_PHASE_ASYNC_END event.
852 if (event_count_) {
853 const char* arg_names[2] = { "event_count", "average_overhead" };
854 unsigned char arg_types[2];
855 unsigned long long arg_values[2];
856 trace_event_internal::SetTraceValue(
857 event_count_, &arg_types[0], &arg_values[0]);
858 trace_event_internal::SetTraceValue(
859 overhead_.InMillisecondsF() / event_count_,
860 &arg_types[1], &arg_values[1]);
861 int thread_id = static_cast<int>(PlatformThread::CurrentId());
862 logged_events_.push_back(TraceEvent(
863 thread_id,
864 TimeTicks::NowFromSystemTraceTime() - trace_log_->time_offset_,
865 ThreadNow(),
866 TRACE_EVENT_PHASE_ASYNC_END,
867 &g_category_group_enabled[g_category_trace_event_overhead],
868 "thread_trace_event",
869 thread_id,
870 2, arg_names, arg_types, arg_values, NULL,
871 TRACE_EVENT_FLAG_HAS_ID));
872 }
873
874 NotificationHelper notifier(trace_log_);
875 {
876 AutoLock lock(trace_log_->lock_);
877 FlushWhileLocked(&notifier);
878 trace_log_->thread_message_loops_.erase(MessageLoop::current());
879 }
880 trace_log_->thread_local_event_buffer_.Set(NULL);
881 notifier.SendNotificationIfAny();
882 }
883
884 void TraceLog::ThreadLocalEventBuffer::AddEvent(const TraceEvent& event,
885 NotificationHelper* notifier) {
886 CheckThisIsCurrentBuffer();
887 logged_events_.push_back(event);
888 if (logged_events_.size() >= kTraceEventThreadLocalBufferSize) {
889 AutoLock lock(trace_log_->lock_);
890 FlushWhileLocked(notifier);
891 }
892 }
893
894 void TraceLog::ThreadLocalEventBuffer::ReportOverhead(
895 const TimeTicks& event_timestamp, const TimeTicks& event_thread_timestamp) {
896 if (!g_category_group_enabled[g_category_trace_event_overhead])
897 return;
898
899 event_count_++;
900 TimeTicks now =
901 TimeTicks::NowFromSystemTraceTime() - trace_log_->time_offset_;
902 TimeDelta overhead = now - event_timestamp;
903 if (overhead.InMicroseconds() >= kOverheadReportThresholdInMicroseconds) {
904 int thread_id = static_cast<int>(PlatformThread::CurrentId());
905 // TODO(wangxianzhu): Use X event when it's ready.
906 logged_events_.push_back(TraceEvent(
907 thread_id, event_timestamp, event_thread_timestamp,
908 TRACE_EVENT_PHASE_BEGIN,
909 &g_category_group_enabled[g_category_trace_event_overhead],
910 "overhead",
911 0, 0, NULL, NULL, NULL, NULL, 0));
912 logged_events_.push_back(TraceEvent(
913 thread_id, now, ThreadNow(),
914 TRACE_EVENT_PHASE_END,
915 &g_category_group_enabled[g_category_trace_event_overhead],
916 "overhead",
917 0, 0, NULL, NULL, NULL, NULL, 0));
918 }
919 overhead_ += overhead;
920 }
921
922 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
923 delete this;
924 }
925
926 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked(
927 NotificationHelper* notifier) {
928 trace_log_->lock_.AssertAcquired();
929 for (size_t i = 0; i < logged_events_.size(); ++i) {
930 trace_log_->AddEventToMainBufferWhileLocked(logged_events_[i]);
931 }
932 logged_events_.resize(0);
933 trace_log_->CheckIfBufferIsFullWhileLocked(notifier);
934 }
935
767 TraceLog::NotificationHelper::NotificationHelper(TraceLog* trace_log) 936 TraceLog::NotificationHelper::NotificationHelper(TraceLog* trace_log)
768 : trace_log_(trace_log), 937 : trace_log_(trace_log),
769 notification_(0) { 938 notification_(0) {
770 } 939 }
771 940
772 TraceLog::NotificationHelper::~NotificationHelper() { 941 TraceLog::NotificationHelper::~NotificationHelper() {
773 } 942 }
774 943
775 void TraceLog::NotificationHelper::AddNotificationWhileLocked( 944 void TraceLog::NotificationHelper::AddNotificationWhileLocked(
776 int notification) { 945 int notification) {
946 trace_log_->lock_.AssertAcquired();
777 if (trace_log_->notification_callback_.is_null()) 947 if (trace_log_->notification_callback_.is_null())
778 return; 948 return;
779 if (notification_ == 0) 949 if (notification_ == 0)
780 callback_copy_ = trace_log_->notification_callback_; 950 callback_copy_ = trace_log_->notification_callback_;
781 notification_ |= notification; 951 notification_ |= notification;
782 } 952 }
783 953
784 void TraceLog::NotificationHelper::SendNotificationIfAny() { 954 void TraceLog::NotificationHelper::SendNotificationIfAny() {
785 if (notification_) 955 if (notification_)
786 callback_copy_.Run(notification_); 956 callback_copy_.Run(notification_);
(...skipping 26 matching lines...) Expand all
813 } 983 }
814 if (!(ret & RECORD_UNTIL_FULL) && !(ret & RECORD_CONTINUOUSLY)) 984 if (!(ret & RECORD_UNTIL_FULL) && !(ret & RECORD_CONTINUOUSLY))
815 ret |= RECORD_UNTIL_FULL; // Default when no options are specified. 985 ret |= RECORD_UNTIL_FULL; // Default when no options are specified.
816 986
817 return static_cast<Options>(ret); 987 return static_cast<Options>(ret);
818 } 988 }
819 989
820 TraceLog::TraceLog() 990 TraceLog::TraceLog()
821 : enable_count_(0), 991 : enable_count_(0),
822 num_traces_recorded_(0), 992 num_traces_recorded_(0),
823 event_callback_(NULL), 993 buffer_is_full_(0),
994 event_callback_(0),
824 dispatching_to_observer_list_(false), 995 dispatching_to_observer_list_(false),
825 process_sort_index_(0), 996 process_sort_index_(0),
826 watch_category_(NULL), 997 process_id_hash_(0),
998 process_id_(0),
999 watch_category_(0),
827 trace_options_(RECORD_UNTIL_FULL), 1000 trace_options_(RECORD_UNTIL_FULL),
828 sampling_thread_handle_(0), 1001 sampling_thread_handle_(0),
829 category_filter_(CategoryFilter::kDefaultCategoryFilterString) { 1002 category_filter_(CategoryFilter::kDefaultCategoryFilterString),
1003 flush_count_(0) {
830 // Trace is enabled or disabled on one thread while other threads are 1004 // Trace is enabled or disabled on one thread while other threads are
831 // accessing the enabled flag. We don't care whether edge-case events are 1005 // accessing the enabled flag. We don't care whether edge-case events are
832 // traced or not, so we allow races on the enabled flag to keep the trace 1006 // traced or not, so we allow races on the enabled flag to keep the trace
833 // macros fast. 1007 // macros fast.
834 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots: 1008 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
835 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled, 1009 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
836 // sizeof(g_category_group_enabled), 1010 // sizeof(g_category_group_enabled),
837 // "trace_event category enabled"); 1011 // "trace_event category enabled");
838 for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) { 1012 for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) {
839 ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i], 1013 ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i],
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after
954 category_group_enabled = 1128 category_group_enabled =
955 &g_category_group_enabled[g_category_categories_exhausted]; 1129 &g_category_group_enabled[g_category_categories_exhausted];
956 } 1130 }
957 } 1131 }
958 return category_group_enabled; 1132 return category_group_enabled;
959 } 1133 }
960 1134
961 void TraceLog::GetKnownCategoryGroups( 1135 void TraceLog::GetKnownCategoryGroups(
962 std::vector<std::string>* category_groups) { 1136 std::vector<std::string>* category_groups) {
963 AutoLock lock(lock_); 1137 AutoLock lock(lock_);
1138 category_groups->push_back(
1139 g_category_groups[g_category_trace_event_overhead]);
964 for (int i = g_num_builtin_categories; i < g_category_index; i++) 1140 for (int i = g_num_builtin_categories; i < g_category_index; i++)
965 category_groups->push_back(g_category_groups[i]); 1141 category_groups->push_back(g_category_groups[i]);
966 } 1142 }
967 1143
968 void TraceLog::SetEnabled(const CategoryFilter& category_filter, 1144 void TraceLog::SetEnabled(const CategoryFilter& category_filter,
969 Options options) { 1145 Options options) {
970 std::vector<EnabledStateObserver*> observer_list; 1146 std::vector<EnabledStateObserver*> observer_list;
971 { 1147 {
972 AutoLock lock(lock_); 1148 AutoLock lock(lock_);
973 1149
1150 // Can't enable tracing when Flush() is in progress.
1151 DCHECK(!flush_message_loop_proxy_.get());
1152
1153 Options old_options = trace_options();
1154
974 if (enable_count_++ > 0) { 1155 if (enable_count_++ > 0) {
975 if (options != trace_options_) { 1156 if (options != old_options) {
976 DLOG(ERROR) << "Attemting to re-enable tracing with a different " 1157 DLOG(ERROR) << "Attemting to re-enable tracing with a different "
977 << "set of options."; 1158 << "set of options.";
978 } 1159 }
979 1160
980 category_filter_.Merge(category_filter); 1161 category_filter_.Merge(category_filter);
981 UpdateCategoryGroupEnabledFlags(); 1162 UpdateCategoryGroupEnabledFlags();
982 return; 1163 return;
983 } 1164 }
984 1165
985 if (options != trace_options_) { 1166 if (options != old_options) {
986 trace_options_ = options; 1167 subtle::NoBarrier_Store(&trace_options_, options);
987 logged_events_.reset(GetTraceBuffer()); 1168 logged_events_.reset(GetTraceBuffer());
1169 subtle::NoBarrier_Store(&buffer_is_full_, 0);
988 } 1170 }
989 1171
990 if (dispatching_to_observer_list_) { 1172 if (dispatching_to_observer_list_) {
991 DLOG(ERROR) << 1173 DLOG(ERROR) <<
992 "Cannot manipulate TraceLog::Enabled state from an observer."; 1174 "Cannot manipulate TraceLog::Enabled state from an observer.";
993 return; 1175 return;
994 } 1176 }
995 1177
996 num_traces_recorded_++; 1178 num_traces_recorded_++;
997 1179
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
1035 AutoLock lock(lock_); 1217 AutoLock lock(lock_);
1036 DCHECK(enable_count_ > 0); 1218 DCHECK(enable_count_ > 0);
1037 return category_filter_; 1219 return category_filter_;
1038 } 1220 }
1039 1221
1040 void TraceLog::SetDisabled() { 1222 void TraceLog::SetDisabled() {
1041 std::vector<EnabledStateObserver*> observer_list; 1223 std::vector<EnabledStateObserver*> observer_list;
1042 { 1224 {
1043 AutoLock lock(lock_); 1225 AutoLock lock(lock_);
1044 DCHECK(enable_count_ > 0); 1226 DCHECK(enable_count_ > 0);
1227
1045 if (--enable_count_ != 0) 1228 if (--enable_count_ != 0)
1046 return; 1229 return;
1047 1230
1048 if (dispatching_to_observer_list_) { 1231 if (dispatching_to_observer_list_) {
1049 DLOG(ERROR) 1232 DLOG(ERROR)
1050 << "Cannot manipulate TraceLog::Enabled state from an observer."; 1233 << "Cannot manipulate TraceLog::Enabled state from an observer.";
1051 return; 1234 return;
1052 } 1235 }
1053 1236
1054 if (sampling_thread_.get()) { 1237 if (sampling_thread_.get()) {
1055 // Stop the sampling thread. 1238 // Stop the sampling thread.
1056 sampling_thread_->Stop(); 1239 sampling_thread_->Stop();
1057 lock_.Release(); 1240 lock_.Release();
1058 PlatformThread::Join(sampling_thread_handle_); 1241 PlatformThread::Join(sampling_thread_handle_);
1059 lock_.Acquire(); 1242 lock_.Acquire();
1060 sampling_thread_handle_ = PlatformThreadHandle(); 1243 sampling_thread_handle_ = PlatformThreadHandle();
1061 sampling_thread_.reset(); 1244 sampling_thread_.reset();
1062 } 1245 }
1063 1246
1064 category_filter_.Clear(); 1247 category_filter_.Clear();
1065 watch_category_ = NULL; 1248 subtle::NoBarrier_Store(&watch_category_, 0);
1066 watch_event_name_ = ""; 1249 watch_event_name_ = "";
1067 UpdateCategoryGroupEnabledFlags(); 1250 UpdateCategoryGroupEnabledFlags();
1068 AddMetadataEvents(); 1251 AddMetadataEvents();
1069 1252
1070 dispatching_to_observer_list_ = true; 1253 dispatching_to_observer_list_ = true;
1071 observer_list = enabled_state_observer_list_; 1254 observer_list = enabled_state_observer_list_;
1072 } 1255 }
1073 1256
1074 // Dispatch to observers outside the lock in case the observer triggers a 1257 // Dispatch to observers outside the lock in case the observer triggers a
1075 // trace event. 1258 // trace event.
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
1115 logged_events_->Capacity()); 1298 logged_events_->Capacity());
1116 } 1299 }
1117 1300
1118 void TraceLog::SetNotificationCallback( 1301 void TraceLog::SetNotificationCallback(
1119 const TraceLog::NotificationCallback& cb) { 1302 const TraceLog::NotificationCallback& cb) {
1120 AutoLock lock(lock_); 1303 AutoLock lock(lock_);
1121 notification_callback_ = cb; 1304 notification_callback_ = cb;
1122 } 1305 }
1123 1306
1124 TraceBuffer* TraceLog::GetTraceBuffer() { 1307 TraceBuffer* TraceLog::GetTraceBuffer() {
1125 if (trace_options_ & RECORD_CONTINUOUSLY) 1308 Options options = trace_options();
1309 if (options & RECORD_CONTINUOUSLY)
1126 return new TraceBufferRingBuffer(); 1310 return new TraceBufferRingBuffer();
1127 else if (trace_options_ & ECHO_TO_CONSOLE) 1311 else if (options & ECHO_TO_CONSOLE)
1128 return new TraceBufferDiscardsEvents(); 1312 return new TraceBufferDiscardsEvents();
1129 return new TraceBufferVector(); 1313 return new TraceBufferVector();
1130 } 1314 }
1131 1315
1316 void TraceLog::AddEventToMainBufferWhileLocked(const TraceEvent& trace_event) {
1317 // Don't check buffer_is_full_ because we want the remaining thread-local
1318 // events to be flushed into the main buffer with this method, otherwise
1319 // we may lose some early events of a thread that generates events sparsely.
1320 lock_.AssertAcquired();
1321 logged_events_->AddEvent(trace_event);
1322 }
1323
1324 void TraceLog::CheckIfBufferIsFullWhileLocked(NotificationHelper* notifier) {
1325 lock_.AssertAcquired();
1326 if (!subtle::NoBarrier_Load(&buffer_is_full_) && logged_events_->IsFull()) {
1327 subtle::NoBarrier_Store(&buffer_is_full_,
1328 static_cast<subtle::AtomicWord>(1));
1329 notifier->AddNotificationWhileLocked(TRACE_BUFFER_FULL);
1330 }
1331 }
1332
1132 void TraceLog::SetEventCallback(EventCallback cb) { 1333 void TraceLog::SetEventCallback(EventCallback cb) {
1133 AutoLock lock(lock_); 1334 subtle::NoBarrier_Store(&event_callback_,
1134 event_callback_ = cb; 1335 reinterpret_cast<subtle::AtomicWord>(cb));
1135 }; 1336 };
1136 1337
1338 // Flush() works as the following:
1339 // 1. Flush() is called in threadA whose message loop is saved in
1340 // flush_message_loop_proxy_;
1341 // 2. If thread_message_loops_ is not empty, threadA posts task to each message
1342 // loop to flush the thread local buffers; otherwise finish the flush;
1343 // 3. FlushCurrentThread() deletes the thread local event buffer:
1344 // - The last batch of events of the thread are flushed into the main buffer;
1345 // - The message loop will be removed from thread_message_loops_;
1346 // If this is the last message loop, finish the flush;
1347 // 4. If any thread hasn't finish its flush in time, finish the flush.
1137 void TraceLog::Flush(const TraceLog::OutputCallback& cb) { 1348 void TraceLog::Flush(const TraceLog::OutputCallback& cb) {
1138 // Ignore memory allocations from here down. 1349 if (IsEnabled()) {
1139 INTERNAL_TRACE_MEMORY(TRACE_DISABLED_BY_DEFAULT("memory"), 1350 // Can't flush when tracing is enabled because otherwise PostTask would
1140 TRACE_MEMORY_IGNORE); 1351 // - it generates more trace events;
1141 scoped_ptr<TraceBuffer> previous_logged_events; 1352 // - it deschedules the calling thread on some platforms causing inaccurate
1353 // timing of the trace events.
1354 scoped_refptr<RefCountedString> empty_result = new RefCountedString;
1355 if (!cb.is_null())
1356 cb.Run(empty_result, false);
1357 return;
1358 }
1359
1360 int flush_count;
1142 { 1361 {
1143 AutoLock lock(lock_); 1362 AutoLock lock(lock_);
1363 flush_count = ++flush_count_;
1364 DCHECK(!flush_message_loop_proxy_.get());
1365 flush_message_loop_proxy_ = MessageLoopProxy::current();
1366 DCHECK(!thread_message_loops_.size() || flush_message_loop_proxy_.get());
1367 flush_output_callback_ = cb;
1368
1369 if (thread_message_loops_.size()) {
1370 for (hash_set<MessageLoop*>::const_iterator it =
1371 thread_message_loops_.begin();
1372 it != thread_message_loops_.end(); ++it) {
1373 (*it)->PostTask(
1374 FROM_HERE,
1375 Bind(&TraceLog::FlushCurrentThread, Unretained(this), flush_count));
1376 }
1377 flush_message_loop_proxy_->PostDelayedTask(
1378 FROM_HERE,
1379 Bind(&TraceLog::OnFlushTimeout, Unretained(this), flush_count),
1380 TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs));
1381 return;
1382 }
1383 }
1384
1385 FinishFlush(flush_count);
1386 }
1387
1388 void TraceLog::FinishFlush(int flush_count) {
1389 scoped_ptr<TraceBuffer> previous_logged_events;
1390 OutputCallback flush_output_callback;
1391
1392 {
1393 AutoLock lock(lock_);
1394 if (flush_count != flush_count_)
1395 return;
1396
1144 previous_logged_events.swap(logged_events_); 1397 previous_logged_events.swap(logged_events_);
1145 logged_events_.reset(GetTraceBuffer()); 1398 logged_events_.reset(GetTraceBuffer());
1146 } // release lock 1399 subtle::NoBarrier_Store(&buffer_is_full_, 0);
1400 flush_message_loop_proxy_ = NULL;
1401 flush_output_callback = flush_output_callback_;
1402 flush_output_callback_.Reset();
1403 }
1147 1404
1148 while (previous_logged_events->HasMoreEvents()) { 1405 if (flush_output_callback.is_null())
1406 return;
1407
1408 bool has_more_events = previous_logged_events->HasMoreEvents();
1409 // The callback need to be called at least once even if there is no events
1410 // to let the caller know the completion of flush.
1411 do {
1149 scoped_refptr<RefCountedString> json_events_str_ptr = 1412 scoped_refptr<RefCountedString> json_events_str_ptr =
1150 new RefCountedString(); 1413 new RefCountedString();
1151 1414
1152 for (size_t i = 0; i < kTraceEventBatchSize; ++i) { 1415 for (size_t i = 0; has_more_events && i < kTraceEventBatchSize; ++i) {
1153 if (i > 0) 1416 if (i > 0)
1154 *(&(json_events_str_ptr->data())) += ","; 1417 *(&(json_events_str_ptr->data())) += ",";
1155 1418
1156 previous_logged_events->NextEvent().AppendAsJSON( 1419 previous_logged_events->NextEvent().AppendAsJSON(
1157 &(json_events_str_ptr->data())); 1420 &(json_events_str_ptr->data()));
1158 1421
1159 if (!previous_logged_events->HasMoreEvents()) 1422 has_more_events = previous_logged_events->HasMoreEvents();
1160 break;
1161 } 1423 }
1162 1424
1163 cb.Run(json_events_str_ptr); 1425 flush_output_callback.Run(json_events_str_ptr, has_more_events);
1426 } while (has_more_events);
1427 }
1428
1429 // Run in each thread holding a local event buffer.
1430 void TraceLog::FlushCurrentThread(int flush_count) {
1431 {
1432 AutoLock lock(lock_);
1433 if (flush_count != flush_count_ || !flush_message_loop_proxy_) {
1434 // This is late. The corresponding flush has finished.
1435 return;
1436 }
1437 }
1438
1439 delete thread_local_event_buffer_.Get();
1440
1441 {
1442 AutoLock lock(lock_);
1443 if (flush_count != flush_count_ || !flush_message_loop_proxy_ ||
1444 thread_message_loops_.size())
1445 return;
1446
1447 flush_message_loop_proxy_->PostTask(
1448 FROM_HERE,
1449 Bind(&TraceLog::FinishFlush, Unretained(this), flush_count));
1164 } 1450 }
1165 } 1451 }
1166 1452
1453 void TraceLog::OnFlushTimeout(int flush_count) {
1454 {
1455 AutoLock lock(lock_);
1456 if (flush_count != flush_count_ || !flush_message_loop_proxy_) {
1457 // Flush has finished before timeout.
1458 return;
1459 }
1460
1461 LOG(WARNING) << thread_message_loops_.size() << " threads haven't finished"
1462 << " flush in time. Discarding remaining events of them";
1463 }
1464 FinishFlush(flush_count);
1465 }
1466
1167 void TraceLog::AddTraceEvent( 1467 void TraceLog::AddTraceEvent(
1168 char phase, 1468 char phase,
1169 const unsigned char* category_group_enabled, 1469 const unsigned char* category_group_enabled,
1170 const char* name, 1470 const char* name,
1171 unsigned long long id, 1471 unsigned long long id,
1172 int num_args, 1472 int num_args,
1173 const char** arg_names, 1473 const char** arg_names,
1174 const unsigned char* arg_types, 1474 const unsigned char* arg_types,
1175 const unsigned long long* arg_values, 1475 const unsigned long long* arg_values,
1176 scoped_ptr<ConvertableToTraceFormat> convertable_values[], 1476 scoped_ptr<ConvertableToTraceFormat> convertable_values[],
(...skipping 27 matching lines...) Expand all
1204 #if defined(OS_ANDROID) 1504 #if defined(OS_ANDROID)
1205 SendToATrace(phase, GetCategoryGroupName(category_group_enabled), name, id, 1505 SendToATrace(phase, GetCategoryGroupName(category_group_enabled), name, id,
1206 num_args, arg_names, arg_types, arg_values, convertable_values, 1506 num_args, arg_names, arg_types, arg_values, convertable_values,
1207 flags); 1507 flags);
1208 #endif 1508 #endif
1209 1509
1210 if (!IsCategoryGroupEnabled(category_group_enabled)) 1510 if (!IsCategoryGroupEnabled(category_group_enabled))
1211 return; 1511 return;
1212 1512
1213 TimeTicks now = timestamp - time_offset_; 1513 TimeTicks now = timestamp - time_offset_;
1214 base::TimeTicks thread_now; 1514 TimeTicks thread_now = ThreadNow();
1215 if (base::TimeTicks::IsThreadNowSupported())
1216 thread_now = base::TimeTicks::ThreadNow();
1217 EventCallback event_callback_copy;
1218 1515
1219 NotificationHelper notifier(this); 1516 NotificationHelper notifier(this);
1220 1517
1518 ThreadLocalEventBuffer* thread_local_event_buffer = NULL;
1519 // A ThreadLocalEventBuffer needs the message loop
1520 // - to know when the thread exits;
1521 // - to handle the final flush.
1522 // For a thread without a message loop, the trace events will be added into
1523 // the main buffer directly.
1524 if (MessageLoop::current()) {
1525 thread_local_event_buffer = thread_local_event_buffer_.Get();
1526 if (!thread_local_event_buffer) {
1527 thread_local_event_buffer = new ThreadLocalEventBuffer(this);
1528 thread_local_event_buffer_.Set(thread_local_event_buffer);
1529 }
1530 }
1531
1221 // Check and update the current thread name only if the event is for the 1532 // Check and update the current thread name only if the event is for the
1222 // current thread to avoid locks in most cases. 1533 // current thread to avoid locks in most cases.
1223 if (thread_id == static_cast<int>(PlatformThread::CurrentId())) { 1534 if (thread_id == static_cast<int>(PlatformThread::CurrentId())) {
1224 const char* new_name = ThreadIdNameManager::GetInstance()-> 1535 const char* new_name = ThreadIdNameManager::GetInstance()->
1225 GetName(thread_id); 1536 GetName(thread_id);
1226 // Check if the thread name has been set or changed since the previous 1537 // Check if the thread name has been set or changed since the previous
1227 // call (if any), but don't bother if the new name is empty. Note this will 1538 // call (if any), but don't bother if the new name is empty. Note this will
1228 // not detect a thread name change within the same char* buffer address: we 1539 // not detect a thread name change within the same char* buffer address: we
1229 // favor common case performance over corner case correctness. 1540 // favor common case performance over corner case correctness.
1230 if (new_name != g_current_thread_name.Get().Get() && 1541 if (new_name != g_current_thread_name.Get().Get() &&
(...skipping 15 matching lines...) Expand all
1246 existing_names.end(), 1557 existing_names.end(),
1247 new_name) != existing_names.end(); 1558 new_name) != existing_names.end();
1248 if (!found) { 1559 if (!found) {
1249 existing_name->second.push_back(','); 1560 existing_name->second.push_back(',');
1250 existing_name->second.append(new_name); 1561 existing_name->second.append(new_name);
1251 } 1562 }
1252 } 1563 }
1253 } 1564 }
1254 } 1565 }
1255 1566
1256 TraceEvent trace_event(thread_id, 1567 if (!subtle::NoBarrier_Load(&buffer_is_full_)) {
1257 now, thread_now, phase, category_group_enabled, name, id, 1568 TraceEvent trace_event(thread_id, now, thread_now, phase,
1258 num_args, arg_names, arg_types, arg_values, 1569 category_group_enabled, name, id,
1259 convertable_values, flags); 1570 num_args, arg_names, arg_types, arg_values,
1571 convertable_values, flags);
1260 1572
1261 do { 1573 if (thread_local_event_buffer) {
1262 AutoLock lock(lock_); 1574 thread_local_event_buffer->AddEvent(trace_event, &notifier);
1575 } else {
1576 AutoLock lock(lock_);
1577 AddEventToMainBufferWhileLocked(trace_event);
1578 CheckIfBufferIsFullWhileLocked(&notifier);
1579 }
1263 1580
1264 event_callback_copy = event_callback_; 1581 if (trace_options() & ECHO_TO_CONSOLE) {
1265 if (logged_events_->IsFull()) 1582 AutoLock lock(lock_);
1266 break;
1267 1583
1268 logged_events_->AddEvent(trace_event);
1269
1270 if (trace_options_ & ECHO_TO_CONSOLE) {
1271 TimeDelta duration; 1584 TimeDelta duration;
1272 if (phase == TRACE_EVENT_PHASE_END) { 1585 if (phase == TRACE_EVENT_PHASE_END) {
1273 duration = timestamp - thread_event_start_times_[thread_id].top(); 1586 duration = timestamp - thread_event_start_times_[thread_id].top();
1274 thread_event_start_times_[thread_id].pop(); 1587 thread_event_start_times_[thread_id].pop();
1275 } 1588 }
1276 1589
1277 std::string thread_name = thread_names_[thread_id]; 1590 std::string thread_name = thread_names_[thread_id];
1278 if (thread_colors_.find(thread_name) == thread_colors_.end()) 1591 if (thread_colors_.find(thread_name) == thread_colors_.end())
1279 thread_colors_[thread_name] = (thread_colors_.size() % 6) + 1; 1592 thread_colors_[thread_name] = (thread_colors_.size() % 6) + 1;
1280 1593
(...skipping 12 matching lines...) Expand all
1293 1606
1294 trace_event.AppendPrettyPrinted(&log); 1607 trace_event.AppendPrettyPrinted(&log);
1295 if (phase == TRACE_EVENT_PHASE_END) 1608 if (phase == TRACE_EVENT_PHASE_END)
1296 log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF()); 1609 log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
1297 1610
1298 LOG(ERROR) << log.str() << "\x1b[0;m"; 1611 LOG(ERROR) << log.str() << "\x1b[0;m";
1299 1612
1300 if (phase == TRACE_EVENT_PHASE_BEGIN) 1613 if (phase == TRACE_EVENT_PHASE_BEGIN)
1301 thread_event_start_times_[thread_id].push(timestamp); 1614 thread_event_start_times_[thread_id].push(timestamp);
1302 } 1615 }
1616 }
1303 1617
1304 if (logged_events_->IsFull()) 1618 if (reinterpret_cast<const unsigned char*>(subtle::NoBarrier_Load(
1305 notifier.AddNotificationWhileLocked(TRACE_BUFFER_FULL); 1619 &watch_category_)) == category_group_enabled) {
1306 1620 AutoLock lock(lock_);
1307 if (watch_category_ == category_group_enabled && watch_event_name_ == name) 1621 if (watch_event_name_ == name)
1308 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION); 1622 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION);
1309 } while (0); // release lock 1623 }
1310 1624
1311 notifier.SendNotificationIfAny(); 1625 notifier.SendNotificationIfAny();
1312 if (event_callback_copy != NULL) { 1626 EventCallback event_callback = reinterpret_cast<EventCallback>(
1313 event_callback_copy(phase, category_group_enabled, name, id, 1627 subtle::NoBarrier_Load(&event_callback_));
1314 num_args, arg_names, arg_types, arg_values, 1628 if (event_callback) {
1315 flags); 1629 event_callback(phase, category_group_enabled, name, id,
1630 num_args, arg_names, arg_types, arg_values,
1631 flags);
1316 } 1632 }
1633
1634 if (thread_local_event_buffer)
1635 thread_local_event_buffer->ReportOverhead(now, thread_now);
1317 } 1636 }
1318 1637
1319 void TraceLog::AddTraceEventEtw(char phase, 1638 void TraceLog::AddTraceEventEtw(char phase,
1320 const char* name, 1639 const char* name,
1321 const void* id, 1640 const void* id,
1322 const char* extra) { 1641 const char* extra) {
1323 #if defined(OS_WIN) 1642 #if defined(OS_WIN)
1324 TraceEventETWProvider::Trace(name, phase, id, extra); 1643 TraceEventETWProvider::Trace(name, phase, id, extra);
1325 #endif 1644 #endif
1326 INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name, 1645 INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
(...skipping 12 matching lines...) Expand all
1339 TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra); 1658 TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
1340 } 1659 }
1341 1660
1342 void TraceLog::SetWatchEvent(const std::string& category_name, 1661 void TraceLog::SetWatchEvent(const std::string& category_name,
1343 const std::string& event_name) { 1662 const std::string& event_name) {
1344 const unsigned char* category = GetCategoryGroupEnabled( 1663 const unsigned char* category = GetCategoryGroupEnabled(
1345 category_name.c_str()); 1664 category_name.c_str());
1346 size_t notify_count = 0; 1665 size_t notify_count = 0;
1347 { 1666 {
1348 AutoLock lock(lock_); 1667 AutoLock lock(lock_);
1349 watch_category_ = category; 1668 subtle::NoBarrier_Store(&watch_category_,
1669 reinterpret_cast<subtle::AtomicWord>(category));
1350 watch_event_name_ = event_name; 1670 watch_event_name_ = event_name;
1351 1671
1352 // First, search existing events for watch event because we want to catch 1672 // First, search existing events for watch event because we want to catch
1353 // it even if it has already occurred. 1673 // it even if it has already occurred.
1354 notify_count = logged_events_->CountEnabledByName(category, event_name); 1674 notify_count = logged_events_->CountEnabledByName(category, event_name);
1355 } // release lock 1675 } // release lock
1356 1676
1357 // Send notification for each event found. 1677 // Send notification for each event found.
1358 for (size_t i = 0; i < notify_count; ++i) { 1678 for (size_t i = 0; i < notify_count; ++i) {
1359 NotificationHelper notifier(this); 1679 NotificationHelper notifier(this);
1360 lock_.Acquire(); 1680 lock_.Acquire();
1361 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION); 1681 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION);
1362 lock_.Release(); 1682 lock_.Release();
1363 notifier.SendNotificationIfAny(); 1683 notifier.SendNotificationIfAny();
1364 } 1684 }
1365 } 1685 }
1366 1686
1367 void TraceLog::CancelWatchEvent() { 1687 void TraceLog::CancelWatchEvent() {
1368 AutoLock lock(lock_); 1688 AutoLock lock(lock_);
1369 watch_category_ = NULL; 1689 subtle::NoBarrier_Store(&watch_category_, 0);
1370 watch_event_name_ = ""; 1690 watch_event_name_ = "";
1371 } 1691 }
1372 1692
1373 namespace { 1693 namespace {
1374 1694
1375 template <typename T> 1695 template <typename T>
1376 void AddMetadataEventToBuffer( 1696 void AddMetadataEventToBuffer(
1377 TraceBuffer* logged_events, 1697 TraceBuffer* logged_events,
1378 int thread_id, 1698 int thread_id,
1379 const char* metadata_name, const char* arg_name, 1699 const char* metadata_name, const char* arg_name,
(...skipping 331 matching lines...) Expand 10 before | Expand all | Expand 10 after
1711 0, // num_args 2031 0, // num_args
1712 NULL, // arg_names 2032 NULL, // arg_names
1713 NULL, // arg_types 2033 NULL, // arg_types
1714 NULL, // arg_values 2034 NULL, // arg_values
1715 NULL, // convertable values 2035 NULL, // convertable values
1716 TRACE_EVENT_FLAG_NONE); // flags 2036 TRACE_EVENT_FLAG_NONE); // flags
1717 } 2037 }
1718 } 2038 }
1719 2039
1720 } // namespace trace_event_internal 2040 } // namespace trace_event_internal
OLDNEW
« no previous file with comments | « base/debug/trace_event_impl.h ('k') | base/debug/trace_event_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698