OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/debug/trace_event.h" | 5 #include "base/debug/trace_event.h" |
6 | 6 |
7 #include <algorithm> | |
8 | |
9 #include "base/bind.h" | |
10 #include "base/file_util.h" | |
11 #include "base/format_macros.h" | |
12 #include "base/lazy_instance.h" | |
13 #include "base/memory/singleton.h" | |
14 #include "base/process_util.h" | |
15 #include "base/stringprintf.h" | |
16 #include "base/string_tokenizer.h" | |
17 #include "base/threading/platform_thread.h" | |
18 #include "base/threading/thread_local.h" | |
19 #include "base/utf_string_conversions.h" | |
20 #include "base/stl_util.h" | |
21 #include "base/sys_info.h" | |
22 #include "base/time.h" | |
23 | |
24 #if defined(OS_WIN) | |
25 #include "base/debug/trace_event_win.h" | |
26 #endif | |
27 | |
28 class DeleteTraceLogForTesting { | |
29 public: | |
30 static void Delete() { | |
31 Singleton<base::debug::TraceLog, | |
32 StaticMemorySingletonTraits<base::debug::TraceLog> >::OnExit(0); | |
33 } | |
34 }; | |
35 | |
36 namespace base { | |
37 namespace debug { | |
38 | |
39 // Controls the number of trace events we will buffer in-memory | |
40 // before throwing them away. | |
41 const size_t kTraceEventBufferSize = 500000; | |
42 const size_t kTraceEventBatchSize = 1000; | |
43 | |
44 #define TRACE_EVENT_MAX_CATEGORIES 100 | |
45 | |
46 namespace { | |
47 | |
48 // Parallel arrays g_categories and g_category_enabled are separate so that | |
49 // a pointer to a member of g_category_enabled can be easily converted to an | |
50 // index into g_categories. This allows macros to deal only with char enabled | |
51 // pointers from g_category_enabled, and we can convert internally to determine | |
52 // the category name from the char enabled pointer. | |
53 const char* g_categories[TRACE_EVENT_MAX_CATEGORIES] = { | |
54 "tracing already shutdown", | |
55 "tracing categories exhausted; must increase TRACE_EVENT_MAX_CATEGORIES", | |
56 "__metadata", | |
57 }; | |
58 // The enabled flag is char instead of bool so that the API can be used from C. | |
59 unsigned char g_category_enabled[TRACE_EVENT_MAX_CATEGORIES] = { 0 }; | |
60 const int g_category_already_shutdown = 0; | |
61 const int g_category_categories_exhausted = 1; | |
62 const int g_category_metadata = 2; | |
63 int g_category_index = 3; // skip initial 3 categories | |
64 | |
65 // The most-recently captured name of the current thread | |
66 LazyInstance<ThreadLocalPointer<const char>, | |
67 LeakyLazyInstanceTraits<ThreadLocalPointer<const char> > > | |
68 g_current_thread_name = LAZY_INSTANCE_INITIALIZER; | |
69 | |
70 void AppendValueAsJSON(unsigned char type, | |
71 TraceEvent::TraceValue value, | |
72 std::string* out) { | |
73 std::string::size_type start_pos; | |
74 switch (type) { | |
75 case TRACE_VALUE_TYPE_BOOL: | |
76 *out += value.as_bool ? "true" : "false"; | |
77 break; | |
78 case TRACE_VALUE_TYPE_UINT: | |
79 StringAppendF(out, "%" PRIu64, static_cast<uint64>(value.as_uint)); | |
80 break; | |
81 case TRACE_VALUE_TYPE_INT: | |
82 StringAppendF(out, "%" PRId64, static_cast<int64>(value.as_int)); | |
83 break; | |
84 case TRACE_VALUE_TYPE_DOUBLE: | |
85 StringAppendF(out, "%f", value.as_double); | |
86 break; | |
87 case TRACE_VALUE_TYPE_POINTER: | |
88 // JSON only supports double and int numbers. | |
89 // So as not to lose bits from a 64-bit pointer, output as a hex string. | |
90 StringAppendF(out, "\"%" PRIx64 "\"", static_cast<uint64>( | |
91 reinterpret_cast<intptr_t>( | |
92 value.as_pointer))); | |
93 break; | |
94 case TRACE_VALUE_TYPE_STRING: | |
95 case TRACE_VALUE_TYPE_COPY_STRING: | |
96 *out += "\""; | |
97 start_pos = out->size(); | |
98 *out += value.as_string ? value.as_string : "NULL"; | |
99 // insert backslash before special characters for proper json format. | |
100 while ((start_pos = out->find_first_of("\\\"", start_pos)) != | |
101 std::string::npos) { | |
102 out->insert(start_pos, 1, '\\'); | |
103 // skip inserted escape character and following character. | |
104 start_pos += 2; | |
105 } | |
106 *out += "\""; | |
107 break; | |
108 default: | |
109 NOTREACHED() << "Don't know how to print this value"; | |
110 break; | |
111 } | |
112 } | |
113 | |
114 } // namespace | |
115 | |
116 //////////////////////////////////////////////////////////////////////////////// | |
117 // | |
118 // TraceEvent | |
119 // | |
120 //////////////////////////////////////////////////////////////////////////////// | |
121 | |
122 namespace { | |
123 | |
124 size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; } | |
125 | |
126 // Copies |*member| into |*buffer|, sets |*member| to point to this new | |
127 // location, and then advances |*buffer| by the amount written. | |
128 void CopyTraceEventParameter(char** buffer, | |
129 const char** member, | |
130 const char* end) { | |
131 if (*member) { | |
132 size_t written = strlcpy(*buffer, *member, end - *buffer) + 1; | |
133 DCHECK_LE(static_cast<int>(written), end - *buffer); | |
134 *member = *buffer; | |
135 *buffer += written; | |
136 } | |
137 } | |
138 | |
139 } // namespace | |
140 | |
141 TraceEvent::TraceEvent() | |
142 : id_(0u), | |
143 category_enabled_(NULL), | |
144 name_(NULL), | |
145 thread_id_(0), | |
146 phase_(TRACE_EVENT_PHASE_BEGIN), | |
147 flags_(0) { | |
148 arg_names_[0] = NULL; | |
149 arg_names_[1] = NULL; | |
150 } | |
151 | |
152 TraceEvent::TraceEvent(int thread_id, | |
153 TimeTicks timestamp, | |
154 char phase, | |
155 const unsigned char* category_enabled, | |
156 const char* name, | |
157 unsigned long long id, | |
158 int num_args, | |
159 const char** arg_names, | |
160 const unsigned char* arg_types, | |
161 const unsigned long long* arg_values, | |
162 unsigned char flags) | |
163 : timestamp_(timestamp), | |
164 id_(id), | |
165 category_enabled_(category_enabled), | |
166 name_(name), | |
167 thread_id_(thread_id), | |
168 phase_(phase), | |
169 flags_(flags) { | |
170 // Clamp num_args since it may have been set by a third_party library. | |
171 num_args = (num_args > kTraceMaxNumArgs) ? kTraceMaxNumArgs : num_args; | |
172 int i = 0; | |
173 for (; i < num_args; ++i) { | |
174 arg_names_[i] = arg_names[i]; | |
175 arg_values_[i].as_uint = arg_values[i]; | |
176 arg_types_[i] = arg_types[i]; | |
177 } | |
178 for (; i < kTraceMaxNumArgs; ++i) { | |
179 arg_names_[i] = NULL; | |
180 arg_values_[i].as_uint = 0u; | |
181 arg_types_[i] = TRACE_VALUE_TYPE_UINT; | |
182 } | |
183 | |
184 bool copy = !!(flags & TRACE_EVENT_FLAG_COPY); | |
185 size_t alloc_size = 0; | |
186 if (copy) { | |
187 alloc_size += GetAllocLength(name); | |
188 for (i = 0; i < num_args; ++i) { | |
189 alloc_size += GetAllocLength(arg_names_[i]); | |
190 if (arg_types_[i] == TRACE_VALUE_TYPE_STRING) | |
191 arg_types_[i] = TRACE_VALUE_TYPE_COPY_STRING; | |
192 } | |
193 } | |
194 | |
195 bool arg_is_copy[kTraceMaxNumArgs]; | |
196 for (i = 0; i < num_args; ++i) { | |
197 // We only take a copy of arg_vals if they are of type COPY_STRING. | |
198 arg_is_copy[i] = (arg_types_[i] == TRACE_VALUE_TYPE_COPY_STRING); | |
199 if (arg_is_copy[i]) | |
200 alloc_size += GetAllocLength(arg_values_[i].as_string); | |
201 } | |
202 | |
203 if (alloc_size) { | |
204 parameter_copy_storage_ = new base::RefCountedString; | |
205 parameter_copy_storage_->data().resize(alloc_size); | |
206 char* ptr = string_as_array(¶meter_copy_storage_->data()); | |
207 const char* end = ptr + alloc_size; | |
208 if (copy) { | |
209 CopyTraceEventParameter(&ptr, &name_, end); | |
210 for (i = 0; i < num_args; ++i) | |
211 CopyTraceEventParameter(&ptr, &arg_names_[i], end); | |
212 } | |
213 for (i = 0; i < num_args; ++i) { | |
214 if (arg_is_copy[i]) | |
215 CopyTraceEventParameter(&ptr, &arg_values_[i].as_string, end); | |
216 } | |
217 DCHECK_EQ(end, ptr) << "Overrun by " << ptr - end; | |
218 } | |
219 } | |
220 | |
221 TraceEvent::~TraceEvent() { | |
222 } | |
223 | |
224 void TraceEvent::AppendEventsAsJSON(const std::vector<TraceEvent>& events, | |
225 size_t start, | |
226 size_t count, | |
227 std::string* out) { | |
228 for (size_t i = 0; i < count && start + i < events.size(); ++i) { | |
229 if (i > 0) | |
230 *out += ","; | |
231 events[i + start].AppendAsJSON(out); | |
232 } | |
233 } | |
234 | |
235 void TraceEvent::AppendAsJSON(std::string* out) const { | |
236 int64 time_int64 = timestamp_.ToInternalValue(); | |
237 int process_id = TraceLog::GetInstance()->process_id(); | |
238 // Category name checked at category creation time. | |
239 DCHECK(!strchr(name_, '"')); | |
240 StringAppendF(out, | |
241 "{\"cat\":\"%s\",\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64 "," | |
242 "\"ph\":\"%c\",\"name\":\"%s\",\"args\":{", | |
243 TraceLog::GetCategoryName(category_enabled_), | |
244 process_id, | |
245 thread_id_, | |
246 time_int64, | |
247 phase_, | |
248 name_); | |
249 | |
250 // Output argument names and values, stop at first NULL argument name. | |
251 for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) { | |
252 if (i > 0) | |
253 *out += ","; | |
254 *out += "\""; | |
255 *out += arg_names_[i]; | |
256 *out += "\":"; | |
257 AppendValueAsJSON(arg_types_[i], arg_values_[i], out); | |
258 } | |
259 *out += "}"; | |
260 | |
261 // If id_ is set, print it out as a hex string so we don't loose any | |
262 // bits (it might be a 64-bit pointer). | |
263 if (flags_ & TRACE_EVENT_FLAG_HAS_ID) | |
264 StringAppendF(out, ",\"id\":\"%" PRIx64 "\"", static_cast<uint64>(id_)); | |
265 *out += "}"; | |
266 } | |
267 | |
268 //////////////////////////////////////////////////////////////////////////////// | |
269 // | |
270 // TraceResultBuffer | |
271 // | |
272 //////////////////////////////////////////////////////////////////////////////// | |
273 | |
274 TraceResultBuffer::OutputCallback | |
275 TraceResultBuffer::SimpleOutput::GetCallback() { | |
276 return base::Bind(&SimpleOutput::Append, base::Unretained(this)); | |
277 } | |
278 | |
279 void TraceResultBuffer::SimpleOutput::Append( | |
280 const std::string& json_trace_output) { | |
281 json_output += json_trace_output; | |
282 } | |
283 | |
284 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) { | |
285 } | |
286 | |
287 TraceResultBuffer::~TraceResultBuffer() { | |
288 } | |
289 | |
290 void TraceResultBuffer::SetOutputCallback( | |
291 const OutputCallback& json_chunk_callback) { | |
292 output_callback_ = json_chunk_callback; | |
293 } | |
294 | |
295 void TraceResultBuffer::Start() { | |
296 append_comma_ = false; | |
297 output_callback_.Run("["); | |
298 } | |
299 | |
300 void TraceResultBuffer::AddFragment(const std::string& trace_fragment) { | |
301 if (append_comma_) | |
302 output_callback_.Run(","); | |
303 append_comma_ = true; | |
304 output_callback_.Run(trace_fragment); | |
305 } | |
306 | |
307 void TraceResultBuffer::Finish() { | |
308 output_callback_.Run("]"); | |
309 } | |
310 | |
311 //////////////////////////////////////////////////////////////////////////////// | |
312 // | |
313 // TraceLog | |
314 // | |
315 //////////////////////////////////////////////////////////////////////////////// | |
316 | |
317 // static | |
318 TraceLog* TraceLog::GetInstance() { | |
319 return Singleton<TraceLog, StaticMemorySingletonTraits<TraceLog> >::get(); | |
320 } | |
321 | |
322 TraceLog::TraceLog() | |
323 : enabled_(false) { | |
324 SetProcessID(static_cast<int>(base::GetCurrentProcId())); | |
325 } | |
326 | |
327 TraceLog::~TraceLog() { | |
328 } | |
329 | |
330 const unsigned char* TraceLog::GetCategoryEnabled(const char* name) { | |
331 TraceLog* tracelog = GetInstance(); | |
332 if (!tracelog) { | |
333 DCHECK(!g_category_enabled[g_category_already_shutdown]); | |
334 return &g_category_enabled[g_category_already_shutdown]; | |
335 } | |
336 return tracelog->GetCategoryEnabledInternal(name); | |
337 } | |
338 | |
339 const char* TraceLog::GetCategoryName(const unsigned char* category_enabled) { | |
340 // Calculate the index of the category by finding category_enabled in | |
341 // g_category_enabled array. | |
342 uintptr_t category_begin = reinterpret_cast<uintptr_t>(g_category_enabled); | |
343 uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_enabled); | |
344 DCHECK(category_ptr >= category_begin && | |
345 category_ptr < reinterpret_cast<uintptr_t>(g_category_enabled + | |
346 TRACE_EVENT_MAX_CATEGORIES)) << | |
347 "out of bounds category pointer"; | |
348 uintptr_t category_index = | |
349 (category_ptr - category_begin) / sizeof(g_category_enabled[0]); | |
350 return g_categories[category_index]; | |
351 } | |
352 | |
353 static void EnableMatchingCategory(int category_index, | |
354 const std::vector<std::string>& patterns, | |
355 unsigned char is_included) { | |
356 std::vector<std::string>::const_iterator ci = patterns.begin(); | |
357 bool is_match = false; | |
358 for (; ci != patterns.end(); ++ci) { | |
359 is_match = MatchPattern(g_categories[category_index], ci->c_str()); | |
360 if (is_match) | |
361 break; | |
362 } | |
363 ANNOTATE_BENIGN_RACE(&g_category_enabled[category_index], | |
364 "trace_event category enabled"); | |
365 g_category_enabled[category_index] = is_match ? | |
366 is_included : (is_included ^ 1); | |
367 } | |
368 | |
369 // Enable/disable each category based on the category filters in |patterns|. | |
370 // If the category name matches one of the patterns, its enabled status is set | |
371 // to |is_included|. Otherwise its enabled status is set to !|is_included|. | |
372 static void EnableMatchingCategories(const std::vector<std::string>& patterns, | |
373 unsigned char is_included) { | |
374 for (int i = 0; i < g_category_index; i++) | |
375 EnableMatchingCategory(i, patterns, is_included); | |
376 } | |
377 | |
378 const unsigned char* TraceLog::GetCategoryEnabledInternal(const char* name) { | |
379 AutoLock lock(lock_); | |
380 DCHECK(!strchr(name, '"')) << "Category names may not contain double quote"; | |
381 | |
382 // Search for pre-existing category matching this name | |
383 for (int i = 0; i < g_category_index; i++) { | |
384 if (strcmp(g_categories[i], name) == 0) | |
385 return &g_category_enabled[i]; | |
386 } | |
387 | |
388 // Create a new category | |
389 DCHECK(g_category_index < TRACE_EVENT_MAX_CATEGORIES) << | |
390 "must increase TRACE_EVENT_MAX_CATEGORIES"; | |
391 if (g_category_index < TRACE_EVENT_MAX_CATEGORIES) { | |
392 int new_index = g_category_index++; | |
393 g_categories[new_index] = name; | |
394 DCHECK(!g_category_enabled[new_index]); | |
395 if (enabled_) { | |
396 // Note that if both included and excluded_categories are empty, the else | |
397 // clause below excludes nothing, thereby enabling this category. | |
398 if (!included_categories_.empty()) | |
399 EnableMatchingCategory(new_index, included_categories_, 1); | |
400 else | |
401 EnableMatchingCategory(new_index, excluded_categories_, 0); | |
402 } else { | |
403 ANNOTATE_BENIGN_RACE(&g_category_enabled[new_index], | |
404 "trace_event category enabled"); | |
405 g_category_enabled[new_index] = 0; | |
406 } | |
407 return &g_category_enabled[new_index]; | |
408 } else { | |
409 return &g_category_enabled[g_category_categories_exhausted]; | |
410 } | |
411 } | |
412 | |
413 void TraceLog::GetKnownCategories(std::vector<std::string>* categories) { | |
414 AutoLock lock(lock_); | |
415 for (int i = 0; i < g_category_index; i++) | |
416 categories->push_back(g_categories[i]); | |
417 } | |
418 | |
419 void TraceLog::SetEnabled(const std::vector<std::string>& included_categories, | |
420 const std::vector<std::string>& excluded_categories) { | |
421 AutoLock lock(lock_); | |
422 if (enabled_) | |
423 return; | |
424 logged_events_.reserve(1024); | |
425 enabled_ = true; | |
426 included_categories_ = included_categories; | |
427 excluded_categories_ = excluded_categories; | |
428 // Note that if both included and excluded_categories are empty, the else | |
429 // clause below excludes nothing, thereby enabling all categories. | |
430 if (!included_categories_.empty()) | |
431 EnableMatchingCategories(included_categories_, 1); | |
432 else | |
433 EnableMatchingCategories(excluded_categories_, 0); | |
434 } | |
435 | |
436 void TraceLog::SetEnabled(const std::string& categories) { | |
437 std::vector<std::string> included, excluded; | |
438 // Tokenize list of categories, delimited by ','. | |
439 StringTokenizer tokens(categories, ","); | |
440 while (tokens.GetNext()) { | |
441 bool is_included = true; | |
442 std::string category = tokens.token(); | |
443 // Excluded categories start with '-'. | |
444 if (category.at(0) == '-') { | |
445 // Remove '-' from category string. | |
446 category = category.substr(1); | |
447 is_included = false; | |
448 } | |
449 if (is_included) | |
450 included.push_back(category); | |
451 else | |
452 excluded.push_back(category); | |
453 } | |
454 SetEnabled(included, excluded); | |
455 } | |
456 | |
457 void TraceLog::GetEnabledTraceCategories( | |
458 std::vector<std::string>* included_out, | |
459 std::vector<std::string>* excluded_out) { | |
460 AutoLock lock(lock_); | |
461 if (enabled_) { | |
462 *included_out = included_categories_; | |
463 *excluded_out = excluded_categories_; | |
464 } | |
465 } | |
466 | |
467 void TraceLog::SetDisabled() { | |
468 { | |
469 AutoLock lock(lock_); | |
470 if (!enabled_) | |
471 return; | |
472 | |
473 enabled_ = false; | |
474 included_categories_.clear(); | |
475 excluded_categories_.clear(); | |
476 for (int i = 0; i < g_category_index; i++) | |
477 g_category_enabled[i] = 0; | |
478 AddThreadNameMetadataEvents(); | |
479 AddClockSyncMetadataEvents(); | |
480 } // release lock | |
481 Flush(); | |
482 } | |
483 | |
484 void TraceLog::SetEnabled(bool enabled) { | |
485 if (enabled) | |
486 SetEnabled(std::vector<std::string>(), std::vector<std::string>()); | |
487 else | |
488 SetDisabled(); | |
489 } | |
490 | |
491 float TraceLog::GetBufferPercentFull() const { | |
492 return (float)((double)logged_events_.size()/(double)kTraceEventBufferSize); | |
493 } | |
494 | |
495 void TraceLog::SetOutputCallback(const TraceLog::OutputCallback& cb) { | |
496 AutoLock lock(lock_); | |
497 output_callback_ = cb; | |
498 } | |
499 | |
500 void TraceLog::SetBufferFullCallback(const TraceLog::BufferFullCallback& cb) { | |
501 AutoLock lock(lock_); | |
502 buffer_full_callback_ = cb; | |
503 } | |
504 | |
505 void TraceLog::Flush() { | |
506 std::vector<TraceEvent> previous_logged_events; | |
507 OutputCallback output_callback_copy; | |
508 { | |
509 AutoLock lock(lock_); | |
510 previous_logged_events.swap(logged_events_); | |
511 output_callback_copy = output_callback_; | |
512 } // release lock | |
513 | |
514 if (output_callback_copy.is_null()) | |
515 return; | |
516 | |
517 for (size_t i = 0; | |
518 i < previous_logged_events.size(); | |
519 i += kTraceEventBatchSize) { | |
520 scoped_refptr<RefCountedString> json_events_str_ptr = | |
521 new RefCountedString(); | |
522 TraceEvent::AppendEventsAsJSON(previous_logged_events, | |
523 i, | |
524 kTraceEventBatchSize, | |
525 &(json_events_str_ptr->data)); | |
526 output_callback_copy.Run(json_events_str_ptr); | |
527 } | |
528 } | |
529 | |
530 int TraceLog::AddTraceEvent(char phase, | |
531 const unsigned char* category_enabled, | |
532 const char* name, | |
533 unsigned long long id, | |
534 int num_args, | |
535 const char** arg_names, | |
536 const unsigned char* arg_types, | |
537 const unsigned long long* arg_values, | |
538 int threshold_begin_id, | |
539 long long threshold, | |
540 unsigned char flags) { | |
541 DCHECK(name); | |
542 TimeTicks now = TimeTicks::HighResNow(); | |
543 BufferFullCallback buffer_full_callback_copy; | |
544 int ret_begin_id = -1; | |
545 { | |
546 AutoLock lock(lock_); | |
547 if (!*category_enabled) | |
548 return -1; | |
549 if (logged_events_.size() >= kTraceEventBufferSize) | |
550 return -1; | |
551 | |
552 int thread_id = static_cast<int>(PlatformThread::CurrentId()); | |
553 | |
554 const char* new_name = PlatformThread::GetName(); | |
555 // Check if the thread name has been set or changed since the previous | |
556 // call (if any), but don't bother if the new name is empty. Note this will | |
557 // not detect a thread name change within the same char* buffer address: we | |
558 // favor common case performance over corner case correctness. | |
559 if (new_name != g_current_thread_name.Get().Get() && | |
560 new_name && *new_name) { | |
561 g_current_thread_name.Get().Set(new_name); | |
562 base::hash_map<int, std::string>::iterator existing_name = | |
563 thread_names_.find(thread_id); | |
564 if (existing_name == thread_names_.end()) { | |
565 // This is a new thread id, and a new name. | |
566 thread_names_[thread_id] = new_name; | |
567 } else { | |
568 // This is a thread id that we've seen before, but potentially with a | |
569 // new name. | |
570 std::vector<base::StringPiece> existing_names; | |
571 Tokenize(existing_name->second, ",", &existing_names); | |
572 bool found = std::find(existing_names.begin(), | |
573 existing_names.end(), | |
574 new_name) != existing_names.end(); | |
575 if (!found) { | |
576 existing_name->second.push_back(','); | |
577 existing_name->second.append(new_name); | |
578 } | |
579 } | |
580 } | |
581 | |
582 if (threshold_begin_id > -1) { | |
583 DCHECK(phase == TRACE_EVENT_PHASE_END); | |
584 size_t begin_i = static_cast<size_t>(threshold_begin_id); | |
585 // Return now if there has been a flush since the begin event was posted. | |
586 if (begin_i >= logged_events_.size()) | |
587 return -1; | |
588 // Determine whether to drop the begin/end pair. | |
589 TimeDelta elapsed = now - logged_events_[begin_i].timestamp(); | |
590 if (elapsed < TimeDelta::FromMicroseconds(threshold)) { | |
591 // Remove begin event and do not add end event. | |
592 // This will be expensive if there have been other events in the | |
593 // mean time (should be rare). | |
594 logged_events_.erase(logged_events_.begin() + begin_i); | |
595 return -1; | |
596 } | |
597 } | |
598 ret_begin_id = static_cast<int>(logged_events_.size()); | |
599 logged_events_.push_back( | |
600 TraceEvent(thread_id, | |
601 now, phase, category_enabled, name, id, | |
602 num_args, arg_names, arg_types, arg_values, | |
603 flags)); | |
604 | |
605 if (logged_events_.size() == kTraceEventBufferSize) { | |
606 buffer_full_callback_copy = buffer_full_callback_; | |
607 } | |
608 } // release lock | |
609 | |
610 if (!buffer_full_callback_copy.is_null()) | |
611 buffer_full_callback_copy.Run(); | |
612 | |
613 return ret_begin_id; | |
614 } | |
615 | |
616 void TraceLog::AddTraceEventEtw(char phase, | |
617 const char* name, | |
618 const void* id, | |
619 const char* extra) { | |
620 #if defined(OS_WIN) | |
621 TraceEventETWProvider::Trace(name, phase, id, extra); | |
622 #endif | |
623 INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name, | |
624 TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra); | |
625 } | |
626 | |
627 void TraceLog::AddTraceEventEtw(char phase, | |
628 const char* name, | |
629 const void* id, | |
630 const std::string& extra) | |
631 { | |
632 #if defined(OS_WIN) | |
633 TraceEventETWProvider::Trace(name, phase, id, extra); | |
634 #endif | |
635 INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name, | |
636 TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra); | |
637 } | |
638 | |
639 void TraceLog::AddCounterEvent(const unsigned char* category_enabled, | |
640 const char* name, | |
641 unsigned long long id, | |
642 const char* value1_name, int value1_val, | |
643 const char* value2_name, int value2_val, | |
644 unsigned char flags) { | |
645 int num_args = value2_name ? 2 : 1; | |
646 const char* arg_names[2] = {value1_name, value2_name}; | |
647 unsigned char arg_types[2]; | |
648 unsigned long long arg_values[2]; | |
649 trace_event_internal::SetTraceValue(value1_val, &arg_types[0], | |
650 &arg_values[0]); | |
651 trace_event_internal::SetTraceValue(value2_val, &arg_types[1], | |
652 &arg_values[1]); | |
653 AddTraceEvent(TRACE_EVENT_PHASE_COUNTER, | |
654 category_enabled, | |
655 name, | |
656 id, | |
657 num_args, | |
658 arg_names, | |
659 arg_types, | |
660 arg_values, | |
661 trace_event_internal::kNoThreshholdBeginId, | |
662 trace_event_internal::kNoThresholdValue, | |
663 flags); | |
664 } | |
665 | |
666 void TraceLog::AddClockSyncMetadataEvents() { | |
667 #if defined(OS_ANDROID) | |
668 // Since Android does not support sched_setaffinity, we cannot establish clock | |
669 // sync unless the scheduler clock is set to global. If the trace_clock file | |
670 // can't be read, we will assume the kernel doesn't support tracing and do | |
671 // nothing. | |
672 std::string clock_mode; | |
673 if (!file_util::ReadFileToString( | |
674 FilePath("/sys/kernel/debug/tracing/trace_clock"), | |
675 &clock_mode)) | |
676 return; | |
677 | |
678 if (clock_mode != "local [global]\n") { | |
679 DLOG(WARNING) << | |
680 "The kernel's tracing clock must be set to global in order for " << | |
681 "trace_event to be synchronized with . Do this by\n" << | |
682 " echo global > /sys/kerel/debug/tracing/trace_clock"; | |
683 return; | |
684 } | |
685 | |
686 // Android's kernel trace system has a trace_marker feature: this is a file on | |
687 // debugfs that takes the written data and pushes it onto the trace | |
688 // buffer. So, to establish clock sync, we write our monotonic clock into that | |
689 // trace buffer. | |
690 TimeTicks now = TimeTicks::HighResNow(); | |
691 | |
692 double now_in_seconds = now.ToInternalValue() / 1000000.0; | |
693 std::string marker = | |
694 StringPrintf("trace_event_clock_sync: parent_ts=%f\n", | |
695 now_in_seconds); | |
696 if (file_util::WriteFile( | |
697 FilePath("/sys/kernel/debug/tracing/trace_marker"), | |
698 marker.c_str(), marker.size()) == -1) { | |
699 DLOG(WARNING) << "Couldn't write to /sys/kernel/debug/tracing/trace_marker"; | |
700 return; | |
701 } | |
702 #endif | |
703 } | |
704 | |
705 void TraceLog::AddThreadNameMetadataEvents() { | |
706 lock_.AssertAcquired(); | |
707 for(base::hash_map<int, std::string>::iterator it = thread_names_.begin(); | |
708 it != thread_names_.end(); | |
709 it++) { | |
710 if (!it->second.empty()) { | |
711 int num_args = 1; | |
712 const char* arg_name = "name"; | |
713 unsigned char arg_type; | |
714 unsigned long long arg_value; | |
715 trace_event_internal::SetTraceValue(it->second, &arg_type, &arg_value); | |
716 logged_events_.push_back( | |
717 TraceEvent(it->first, | |
718 TimeTicks(), TRACE_EVENT_PHASE_METADATA, | |
719 &g_category_enabled[g_category_metadata], | |
720 "thread_name", trace_event_internal::kNoEventId, | |
721 num_args, &arg_name, &arg_type, &arg_value, | |
722 TRACE_EVENT_FLAG_NONE)); | |
723 } | |
724 } | |
725 } | |
726 | |
727 void TraceLog::DeleteForTesting() { | |
728 DeleteTraceLogForTesting::Delete(); | |
729 } | |
730 | |
731 void TraceLog::Resurrect() { | |
732 StaticMemorySingletonTraits<TraceLog>::Resurrect(); | |
733 } | |
734 | |
735 void TraceLog::SetProcessID(int process_id) { | |
736 process_id_ = process_id; | |
737 // Create a FNV hash from the process ID for XORing. | |
738 // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details. | |
739 unsigned long long offset_basis = 14695981039346656037ull; | |
740 unsigned long long fnv_prime = 1099511628211ull; | |
741 unsigned long long pid = static_cast<unsigned long long>(process_id_); | |
742 process_id_hash_ = (offset_basis ^ pid) * fnv_prime; | |
743 } | |
744 | |
745 } // namespace debug | |
746 } // namespace base | |
747 | |
748 namespace trace_event_internal { | 7 namespace trace_event_internal { |
749 | 8 |
750 void TraceEndOnScopeClose::Initialize(const unsigned char* category_enabled, | 9 void TraceEndOnScopeClose::Initialize(const unsigned char* category_enabled, |
751 const char* name) { | 10 const char* name) { |
752 data_.category_enabled = category_enabled; | 11 data_.category_enabled = category_enabled; |
753 data_.name = name; | 12 data_.name = name; |
754 p_data_ = &data_; | 13 p_data_ = &data_; |
755 } | 14 } |
756 | 15 |
757 void TraceEndOnScopeClose::AddEventIfEnabled() { | 16 void TraceEndOnScopeClose::AddEventIfEnabled() { |
(...skipping 27 matching lines...) Expand all Loading... |
785 TRACE_EVENT_PHASE_END, | 44 TRACE_EVENT_PHASE_END, |
786 p_data_->category_enabled, | 45 p_data_->category_enabled, |
787 p_data_->name, kNoEventId, | 46 p_data_->name, kNoEventId, |
788 kZeroNumArgs, NULL, NULL, NULL, | 47 kZeroNumArgs, NULL, NULL, NULL, |
789 p_data_->threshold_begin_id, p_data_->threshold, | 48 p_data_->threshold_begin_id, p_data_->threshold, |
790 TRACE_EVENT_FLAG_NONE); | 49 TRACE_EVENT_FLAG_NONE); |
791 } | 50 } |
792 } | 51 } |
793 | 52 |
794 } // namespace trace_event_internal | 53 } // namespace trace_event_internal |
OLD | NEW |