Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(149)

Side by Side Diff: src/heap-snapshot-generator.cc

Issue 12314027: Split profile-generator (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its 12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived 13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission. 14 // from this software without specific prior written permission.
15 // 15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #include "v8.h" 28 #include "v8.h"
29 29
30 #include "profile-generator-inl.h" 30 #include "heap-snapshot-generator-inl.h"
31 31
32 #include "global-handles.h"
33 #include "heap-profiler.h" 32 #include "heap-profiler.h"
yurys 2013/02/21 09:07:44 It is already included in "heap-snapshot-generator
34 #include "scopeinfo.h"
35 #include "unicode.h"
36 #include "zone-inl.h"
37 #include "debug.h" 33 #include "debug.h"
38 34
39 namespace v8 { 35 namespace v8 {
40 namespace internal { 36 namespace internal {
41 37
42 38
43 TokenEnumerator::TokenEnumerator()
44 : token_locations_(4),
45 token_removed_(4) {
46 }
47
48
49 TokenEnumerator::~TokenEnumerator() {
50 Isolate* isolate = Isolate::Current();
51 for (int i = 0; i < token_locations_.length(); ++i) {
52 if (!token_removed_[i]) {
53 isolate->global_handles()->ClearWeakness(token_locations_[i]);
54 isolate->global_handles()->Destroy(token_locations_[i]);
55 }
56 }
57 }
58
59
60 int TokenEnumerator::GetTokenId(Object* token) {
61 Isolate* isolate = Isolate::Current();
62 if (token == NULL) return TokenEnumerator::kNoSecurityToken;
63 for (int i = 0; i < token_locations_.length(); ++i) {
64 if (*token_locations_[i] == token && !token_removed_[i]) return i;
65 }
66 Handle<Object> handle = isolate->global_handles()->Create(token);
67 // handle.location() points to a memory cell holding a pointer
68 // to a token object in the V8's heap.
69 isolate->global_handles()->MakeWeak(handle.location(),
70 this,
71 NULL,
72 TokenRemovedCallback);
73 token_locations_.Add(handle.location());
74 token_removed_.Add(false);
75 return token_locations_.length() - 1;
76 }
77
78
79 void TokenEnumerator::TokenRemovedCallback(v8::Isolate* isolate,
80 v8::Persistent<v8::Value> handle,
81 void* parameter) {
82 reinterpret_cast<TokenEnumerator*>(parameter)->TokenRemoved(
83 Utils::OpenHandle(*handle).location());
84 handle.Dispose(isolate);
85 }
86
87
88 void TokenEnumerator::TokenRemoved(Object** token_location) {
89 for (int i = 0; i < token_locations_.length(); ++i) {
90 if (token_locations_[i] == token_location && !token_removed_[i]) {
91 token_removed_[i] = true;
92 return;
93 }
94 }
95 }
96
97
98 StringsStorage::StringsStorage()
99 : names_(StringsMatch) {
100 }
101
102
103 StringsStorage::~StringsStorage() {
104 for (HashMap::Entry* p = names_.Start();
105 p != NULL;
106 p = names_.Next(p)) {
107 DeleteArray(reinterpret_cast<const char*>(p->value));
108 }
109 }
110
111
112 const char* StringsStorage::GetCopy(const char* src) {
113 int len = static_cast<int>(strlen(src));
114 Vector<char> dst = Vector<char>::New(len + 1);
115 OS::StrNCpy(dst, src, len);
116 dst[len] = '\0';
117 uint32_t hash =
118 StringHasher::HashSequentialString(dst.start(), len, HEAP->HashSeed());
119 return AddOrDisposeString(dst.start(), hash);
120 }
121
122
123 const char* StringsStorage::GetFormatted(const char* format, ...) {
124 va_list args;
125 va_start(args, format);
126 const char* result = GetVFormatted(format, args);
127 va_end(args);
128 return result;
129 }
130
131
132 const char* StringsStorage::AddOrDisposeString(char* str, uint32_t hash) {
133 HashMap::Entry* cache_entry = names_.Lookup(str, hash, true);
134 if (cache_entry->value == NULL) {
135 // New entry added.
136 cache_entry->value = str;
137 } else {
138 DeleteArray(str);
139 }
140 return reinterpret_cast<const char*>(cache_entry->value);
141 }
142
143
144 const char* StringsStorage::GetVFormatted(const char* format, va_list args) {
145 Vector<char> str = Vector<char>::New(1024);
146 int len = OS::VSNPrintF(str, format, args);
147 if (len == -1) {
148 DeleteArray(str.start());
149 return format;
150 }
151 uint32_t hash = StringHasher::HashSequentialString(
152 str.start(), len, HEAP->HashSeed());
153 return AddOrDisposeString(str.start(), hash);
154 }
155
156
157 const char* StringsStorage::GetName(String* name) {
158 if (name->IsString()) {
159 int length = Min(kMaxNameSize, name->length());
160 SmartArrayPointer<char> data =
161 name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length);
162 uint32_t hash = StringHasher::HashSequentialString(
163 *data, length, name->GetHeap()->HashSeed());
164 return AddOrDisposeString(data.Detach(), hash);
165 }
166 return "";
167 }
168
169
170 const char* StringsStorage::GetName(int index) {
171 return GetFormatted("%d", index);
172 }
173
174
175 size_t StringsStorage::GetUsedMemorySize() const {
176 size_t size = sizeof(*this);
177 size += sizeof(HashMap::Entry) * names_.capacity();
178 for (HashMap::Entry* p = names_.Start(); p != NULL; p = names_.Next(p)) {
179 size += strlen(reinterpret_cast<const char*>(p->value)) + 1;
180 }
181 return size;
182 }
183
184 const char* const CodeEntry::kEmptyNamePrefix = "";
185
186
187 void CodeEntry::CopyData(const CodeEntry& source) {
188 tag_ = source.tag_;
189 name_prefix_ = source.name_prefix_;
190 name_ = source.name_;
191 resource_name_ = source.resource_name_;
192 line_number_ = source.line_number_;
193 }
194
195
196 uint32_t CodeEntry::GetCallUid() const {
197 uint32_t hash = ComputeIntegerHash(tag_, v8::internal::kZeroHashSeed);
198 if (shared_id_ != 0) {
199 hash ^= ComputeIntegerHash(static_cast<uint32_t>(shared_id_),
200 v8::internal::kZeroHashSeed);
201 } else {
202 hash ^= ComputeIntegerHash(
203 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)),
204 v8::internal::kZeroHashSeed);
205 hash ^= ComputeIntegerHash(
206 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)),
207 v8::internal::kZeroHashSeed);
208 hash ^= ComputeIntegerHash(
209 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)),
210 v8::internal::kZeroHashSeed);
211 hash ^= ComputeIntegerHash(line_number_, v8::internal::kZeroHashSeed);
212 }
213 return hash;
214 }
215
216
217 bool CodeEntry::IsSameAs(CodeEntry* entry) const {
218 return this == entry
219 || (tag_ == entry->tag_
220 && shared_id_ == entry->shared_id_
221 && (shared_id_ != 0
222 || (name_prefix_ == entry->name_prefix_
223 && name_ == entry->name_
224 && resource_name_ == entry->resource_name_
225 && line_number_ == entry->line_number_)));
226 }
227
228
229 ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
230 HashMap::Entry* map_entry =
231 children_.Lookup(entry, CodeEntryHash(entry), false);
232 return map_entry != NULL ?
233 reinterpret_cast<ProfileNode*>(map_entry->value) : NULL;
234 }
235
236
237 ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
238 HashMap::Entry* map_entry =
239 children_.Lookup(entry, CodeEntryHash(entry), true);
240 if (map_entry->value == NULL) {
241 // New node added.
242 ProfileNode* new_node = new ProfileNode(tree_, entry);
243 map_entry->value = new_node;
244 children_list_.Add(new_node);
245 }
246 return reinterpret_cast<ProfileNode*>(map_entry->value);
247 }
248
249
250 double ProfileNode::GetSelfMillis() const {
251 return tree_->TicksToMillis(self_ticks_);
252 }
253
254
255 double ProfileNode::GetTotalMillis() const {
256 return tree_->TicksToMillis(total_ticks_);
257 }
258
259
260 void ProfileNode::Print(int indent) {
261 OS::Print("%5u %5u %*c %s%s [%d]",
262 total_ticks_, self_ticks_,
263 indent, ' ',
264 entry_->name_prefix(),
265 entry_->name(),
266 entry_->security_token_id());
267 if (entry_->resource_name()[0] != '\0')
268 OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
269 OS::Print("\n");
270 for (HashMap::Entry* p = children_.Start();
271 p != NULL;
272 p = children_.Next(p)) {
273 reinterpret_cast<ProfileNode*>(p->value)->Print(indent + 2);
274 }
275 }
276
277
278 class DeleteNodesCallback {
279 public:
280 void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
281
282 void AfterAllChildrenTraversed(ProfileNode* node) {
283 delete node;
284 }
285
286 void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
287 };
288
289
290 ProfileTree::ProfileTree()
291 : root_entry_(Logger::FUNCTION_TAG,
292 "",
293 "(root)",
294 "",
295 0,
296 TokenEnumerator::kNoSecurityToken),
297 root_(new ProfileNode(this, &root_entry_)) {
298 }
299
300
301 ProfileTree::~ProfileTree() {
302 DeleteNodesCallback cb;
303 TraverseDepthFirst(&cb);
304 }
305
306
307 void ProfileTree::AddPathFromEnd(const Vector<CodeEntry*>& path) {
308 ProfileNode* node = root_;
309 for (CodeEntry** entry = path.start() + path.length() - 1;
310 entry != path.start() - 1;
311 --entry) {
312 if (*entry != NULL) {
313 node = node->FindOrAddChild(*entry);
314 }
315 }
316 node->IncrementSelfTicks();
317 }
318
319
320 void ProfileTree::AddPathFromStart(const Vector<CodeEntry*>& path) {
321 ProfileNode* node = root_;
322 for (CodeEntry** entry = path.start();
323 entry != path.start() + path.length();
324 ++entry) {
325 if (*entry != NULL) {
326 node = node->FindOrAddChild(*entry);
327 }
328 }
329 node->IncrementSelfTicks();
330 }
331
332
333 struct NodesPair {
334 NodesPair(ProfileNode* src, ProfileNode* dst)
335 : src(src), dst(dst) { }
336 ProfileNode* src;
337 ProfileNode* dst;
338 };
339
340
341 class FilteredCloneCallback {
342 public:
343 FilteredCloneCallback(ProfileNode* dst_root, int security_token_id)
344 : stack_(10),
345 security_token_id_(security_token_id) {
346 stack_.Add(NodesPair(NULL, dst_root));
347 }
348
349 void BeforeTraversingChild(ProfileNode* parent, ProfileNode* child) {
350 if (IsTokenAcceptable(child->entry()->security_token_id(),
351 parent->entry()->security_token_id())) {
352 ProfileNode* clone = stack_.last().dst->FindOrAddChild(child->entry());
353 clone->IncreaseSelfTicks(child->self_ticks());
354 stack_.Add(NodesPair(child, clone));
355 } else {
356 // Attribute ticks to parent node.
357 stack_.last().dst->IncreaseSelfTicks(child->self_ticks());
358 }
359 }
360
361 void AfterAllChildrenTraversed(ProfileNode* parent) { }
362
363 void AfterChildTraversed(ProfileNode*, ProfileNode* child) {
364 if (stack_.last().src == child) {
365 stack_.RemoveLast();
366 }
367 }
368
369 private:
370 bool IsTokenAcceptable(int token, int parent_token) {
371 if (token == TokenEnumerator::kNoSecurityToken
372 || token == security_token_id_) return true;
373 if (token == TokenEnumerator::kInheritsSecurityToken) {
374 ASSERT(parent_token != TokenEnumerator::kInheritsSecurityToken);
375 return parent_token == TokenEnumerator::kNoSecurityToken
376 || parent_token == security_token_id_;
377 }
378 return false;
379 }
380
381 List<NodesPair> stack_;
382 int security_token_id_;
383 };
384
385 void ProfileTree::FilteredClone(ProfileTree* src, int security_token_id) {
386 ms_to_ticks_scale_ = src->ms_to_ticks_scale_;
387 FilteredCloneCallback cb(root_, security_token_id);
388 src->TraverseDepthFirst(&cb);
389 CalculateTotalTicks();
390 }
391
392
393 void ProfileTree::SetTickRatePerMs(double ticks_per_ms) {
394 ms_to_ticks_scale_ = ticks_per_ms > 0 ? 1.0 / ticks_per_ms : 1.0;
395 }
396
397
398 class Position {
399 public:
400 explicit Position(ProfileNode* node)
401 : node(node), child_idx_(0) { }
402 INLINE(ProfileNode* current_child()) {
403 return node->children()->at(child_idx_);
404 }
405 INLINE(bool has_current_child()) {
406 return child_idx_ < node->children()->length();
407 }
408 INLINE(void next_child()) { ++child_idx_; }
409
410 ProfileNode* node;
411 private:
412 int child_idx_;
413 };
414
415
416 // Non-recursive implementation of a depth-first post-order tree traversal.
417 template <typename Callback>
418 void ProfileTree::TraverseDepthFirst(Callback* callback) {
419 List<Position> stack(10);
420 stack.Add(Position(root_));
421 while (stack.length() > 0) {
422 Position& current = stack.last();
423 if (current.has_current_child()) {
424 callback->BeforeTraversingChild(current.node, current.current_child());
425 stack.Add(Position(current.current_child()));
426 } else {
427 callback->AfterAllChildrenTraversed(current.node);
428 if (stack.length() > 1) {
429 Position& parent = stack[stack.length() - 2];
430 callback->AfterChildTraversed(parent.node, current.node);
431 parent.next_child();
432 }
433 // Remove child from the stack.
434 stack.RemoveLast();
435 }
436 }
437 }
438
439
440 class CalculateTotalTicksCallback {
441 public:
442 void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
443
444 void AfterAllChildrenTraversed(ProfileNode* node) {
445 node->IncreaseTotalTicks(node->self_ticks());
446 }
447
448 void AfterChildTraversed(ProfileNode* parent, ProfileNode* child) {
449 parent->IncreaseTotalTicks(child->total_ticks());
450 }
451 };
452
453
454 void ProfileTree::CalculateTotalTicks() {
455 CalculateTotalTicksCallback cb;
456 TraverseDepthFirst(&cb);
457 }
458
459
460 void ProfileTree::ShortPrint() {
461 OS::Print("root: %u %u %.2fms %.2fms\n",
462 root_->total_ticks(), root_->self_ticks(),
463 root_->GetTotalMillis(), root_->GetSelfMillis());
464 }
465
466
467 void CpuProfile::AddPath(const Vector<CodeEntry*>& path) {
468 top_down_.AddPathFromEnd(path);
469 bottom_up_.AddPathFromStart(path);
470 }
471
472
473 void CpuProfile::CalculateTotalTicks() {
474 top_down_.CalculateTotalTicks();
475 bottom_up_.CalculateTotalTicks();
476 }
477
478
479 void CpuProfile::SetActualSamplingRate(double actual_sampling_rate) {
480 top_down_.SetTickRatePerMs(actual_sampling_rate);
481 bottom_up_.SetTickRatePerMs(actual_sampling_rate);
482 }
483
484
485 CpuProfile* CpuProfile::FilteredClone(int security_token_id) {
486 ASSERT(security_token_id != TokenEnumerator::kNoSecurityToken);
487 CpuProfile* clone = new CpuProfile(title_, uid_);
488 clone->top_down_.FilteredClone(&top_down_, security_token_id);
489 clone->bottom_up_.FilteredClone(&bottom_up_, security_token_id);
490 return clone;
491 }
492
493
494 void CpuProfile::ShortPrint() {
495 OS::Print("top down ");
496 top_down_.ShortPrint();
497 OS::Print("bottom up ");
498 bottom_up_.ShortPrint();
499 }
500
501
502 void CpuProfile::Print() {
503 OS::Print("[Top down]:\n");
504 top_down_.Print();
505 OS::Print("[Bottom up]:\n");
506 bottom_up_.Print();
507 }
508
509
510 CodeEntry* const CodeMap::kSharedFunctionCodeEntry = NULL;
511 const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL;
512
513
514 void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
515 DeleteAllCoveredCode(addr, addr + size);
516 CodeTree::Locator locator;
517 tree_.Insert(addr, &locator);
518 locator.set_value(CodeEntryInfo(entry, size));
519 }
520
521
522 void CodeMap::DeleteAllCoveredCode(Address start, Address end) {
523 List<Address> to_delete;
524 Address addr = end - 1;
525 while (addr >= start) {
526 CodeTree::Locator locator;
527 if (!tree_.FindGreatestLessThan(addr, &locator)) break;
528 Address start2 = locator.key(), end2 = start2 + locator.value().size;
529 if (start2 < end && start < end2) to_delete.Add(start2);
530 addr = start2 - 1;
531 }
532 for (int i = 0; i < to_delete.length(); ++i) tree_.Remove(to_delete[i]);
533 }
534
535
536 CodeEntry* CodeMap::FindEntry(Address addr) {
537 CodeTree::Locator locator;
538 if (tree_.FindGreatestLessThan(addr, &locator)) {
539 // locator.key() <= addr. Need to check that addr is within entry.
540 const CodeEntryInfo& entry = locator.value();
541 if (addr < (locator.key() + entry.size))
542 return entry.entry;
543 }
544 return NULL;
545 }
546
547
548 int CodeMap::GetSharedId(Address addr) {
549 CodeTree::Locator locator;
550 // For shared function entries, 'size' field is used to store their IDs.
551 if (tree_.Find(addr, &locator)) {
552 const CodeEntryInfo& entry = locator.value();
553 ASSERT(entry.entry == kSharedFunctionCodeEntry);
554 return entry.size;
555 } else {
556 tree_.Insert(addr, &locator);
557 int id = next_shared_id_++;
558 locator.set_value(CodeEntryInfo(kSharedFunctionCodeEntry, id));
559 return id;
560 }
561 }
562
563
564 void CodeMap::MoveCode(Address from, Address to) {
565 if (from == to) return;
566 CodeTree::Locator locator;
567 if (!tree_.Find(from, &locator)) return;
568 CodeEntryInfo entry = locator.value();
569 tree_.Remove(from);
570 AddCode(to, entry.entry, entry.size);
571 }
572
573
574 void CodeMap::CodeTreePrinter::Call(
575 const Address& key, const CodeMap::CodeEntryInfo& value) {
576 OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
577 }
578
579
580 void CodeMap::Print() {
581 CodeTreePrinter printer;
582 tree_.ForEach(&printer);
583 }
584
585
586 CpuProfilesCollection::CpuProfilesCollection()
587 : profiles_uids_(UidsMatch),
588 current_profiles_semaphore_(OS::CreateSemaphore(1)) {
589 // Create list of unabridged profiles.
590 profiles_by_token_.Add(new List<CpuProfile*>());
591 }
592
593
594 static void DeleteCodeEntry(CodeEntry** entry_ptr) {
595 delete *entry_ptr;
596 }
597
598 static void DeleteCpuProfile(CpuProfile** profile_ptr) {
599 delete *profile_ptr;
600 }
601
602 static void DeleteProfilesList(List<CpuProfile*>** list_ptr) {
603 if (*list_ptr != NULL) {
604 (*list_ptr)->Iterate(DeleteCpuProfile);
605 delete *list_ptr;
606 }
607 }
608
609 CpuProfilesCollection::~CpuProfilesCollection() {
610 delete current_profiles_semaphore_;
611 current_profiles_.Iterate(DeleteCpuProfile);
612 detached_profiles_.Iterate(DeleteCpuProfile);
613 profiles_by_token_.Iterate(DeleteProfilesList);
614 code_entries_.Iterate(DeleteCodeEntry);
615 }
616
617
618 bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid) {
619 ASSERT(uid > 0);
620 current_profiles_semaphore_->Wait();
621 if (current_profiles_.length() >= kMaxSimultaneousProfiles) {
622 current_profiles_semaphore_->Signal();
623 return false;
624 }
625 for (int i = 0; i < current_profiles_.length(); ++i) {
626 if (strcmp(current_profiles_[i]->title(), title) == 0) {
627 // Ignore attempts to start profile with the same title.
628 current_profiles_semaphore_->Signal();
629 return false;
630 }
631 }
632 current_profiles_.Add(new CpuProfile(title, uid));
633 current_profiles_semaphore_->Signal();
634 return true;
635 }
636
637
638 bool CpuProfilesCollection::StartProfiling(String* title, unsigned uid) {
639 return StartProfiling(GetName(title), uid);
640 }
641
642
643 CpuProfile* CpuProfilesCollection::StopProfiling(int security_token_id,
644 const char* title,
645 double actual_sampling_rate) {
646 const int title_len = StrLength(title);
647 CpuProfile* profile = NULL;
648 current_profiles_semaphore_->Wait();
649 for (int i = current_profiles_.length() - 1; i >= 0; --i) {
650 if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) {
651 profile = current_profiles_.Remove(i);
652 break;
653 }
654 }
655 current_profiles_semaphore_->Signal();
656
657 if (profile != NULL) {
658 profile->CalculateTotalTicks();
659 profile->SetActualSamplingRate(actual_sampling_rate);
660 List<CpuProfile*>* unabridged_list =
661 profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
662 unabridged_list->Add(profile);
663 HashMap::Entry* entry =
664 profiles_uids_.Lookup(reinterpret_cast<void*>(profile->uid()),
665 static_cast<uint32_t>(profile->uid()),
666 true);
667 ASSERT(entry->value == NULL);
668 entry->value = reinterpret_cast<void*>(unabridged_list->length() - 1);
669 return GetProfile(security_token_id, profile->uid());
670 }
671 return NULL;
672 }
673
674
675 CpuProfile* CpuProfilesCollection::GetProfile(int security_token_id,
676 unsigned uid) {
677 int index = GetProfileIndex(uid);
678 if (index < 0) return NULL;
679 List<CpuProfile*>* unabridged_list =
680 profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
681 if (security_token_id == TokenEnumerator::kNoSecurityToken) {
682 return unabridged_list->at(index);
683 }
684 List<CpuProfile*>* list = GetProfilesList(security_token_id);
685 if (list->at(index) == NULL) {
686 (*list)[index] =
687 unabridged_list->at(index)->FilteredClone(security_token_id);
688 }
689 return list->at(index);
690 }
691
692
693 int CpuProfilesCollection::GetProfileIndex(unsigned uid) {
694 HashMap::Entry* entry = profiles_uids_.Lookup(reinterpret_cast<void*>(uid),
695 static_cast<uint32_t>(uid),
696 false);
697 return entry != NULL ?
698 static_cast<int>(reinterpret_cast<intptr_t>(entry->value)) : -1;
699 }
700
701
702 bool CpuProfilesCollection::IsLastProfile(const char* title) {
703 // Called from VM thread, and only it can mutate the list,
704 // so no locking is needed here.
705 if (current_profiles_.length() != 1) return false;
706 return StrLength(title) == 0
707 || strcmp(current_profiles_[0]->title(), title) == 0;
708 }
709
710
711 void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
712 // Called from VM thread for a completed profile.
713 unsigned uid = profile->uid();
714 int index = GetProfileIndex(uid);
715 if (index < 0) {
716 detached_profiles_.RemoveElement(profile);
717 return;
718 }
719 profiles_uids_.Remove(reinterpret_cast<void*>(uid),
720 static_cast<uint32_t>(uid));
721 // Decrement all indexes above the deleted one.
722 for (HashMap::Entry* p = profiles_uids_.Start();
723 p != NULL;
724 p = profiles_uids_.Next(p)) {
725 intptr_t p_index = reinterpret_cast<intptr_t>(p->value);
726 if (p_index > index) {
727 p->value = reinterpret_cast<void*>(p_index - 1);
728 }
729 }
730 for (int i = 0; i < profiles_by_token_.length(); ++i) {
731 List<CpuProfile*>* list = profiles_by_token_[i];
732 if (list != NULL && index < list->length()) {
733 // Move all filtered clones into detached_profiles_,
734 // so we can know that they are still in use.
735 CpuProfile* cloned_profile = list->Remove(index);
736 if (cloned_profile != NULL && cloned_profile != profile) {
737 detached_profiles_.Add(cloned_profile);
738 }
739 }
740 }
741 }
742
743
744 int CpuProfilesCollection::TokenToIndex(int security_token_id) {
745 ASSERT(TokenEnumerator::kNoSecurityToken == -1);
746 return security_token_id + 1; // kNoSecurityToken -> 0, 0 -> 1, ...
747 }
748
749
750 List<CpuProfile*>* CpuProfilesCollection::GetProfilesList(
751 int security_token_id) {
752 const int index = TokenToIndex(security_token_id);
753 const int lists_to_add = index - profiles_by_token_.length() + 1;
754 if (lists_to_add > 0) profiles_by_token_.AddBlock(NULL, lists_to_add);
755 List<CpuProfile*>* unabridged_list =
756 profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
757 const int current_count = unabridged_list->length();
758 if (profiles_by_token_[index] == NULL) {
759 profiles_by_token_[index] = new List<CpuProfile*>(current_count);
760 }
761 List<CpuProfile*>* list = profiles_by_token_[index];
762 const int profiles_to_add = current_count - list->length();
763 if (profiles_to_add > 0) list->AddBlock(NULL, profiles_to_add);
764 return list;
765 }
766
767
768 List<CpuProfile*>* CpuProfilesCollection::Profiles(int security_token_id) {
769 List<CpuProfile*>* unabridged_list =
770 profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
771 if (security_token_id == TokenEnumerator::kNoSecurityToken) {
772 return unabridged_list;
773 }
774 List<CpuProfile*>* list = GetProfilesList(security_token_id);
775 const int current_count = unabridged_list->length();
776 for (int i = 0; i < current_count; ++i) {
777 if (list->at(i) == NULL) {
778 (*list)[i] = unabridged_list->at(i)->FilteredClone(security_token_id);
779 }
780 }
781 return list;
782 }
783
784
785 CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
786 String* name,
787 String* resource_name,
788 int line_number) {
789 CodeEntry* entry = new CodeEntry(tag,
790 CodeEntry::kEmptyNamePrefix,
791 GetFunctionName(name),
792 GetName(resource_name),
793 line_number,
794 TokenEnumerator::kNoSecurityToken);
795 code_entries_.Add(entry);
796 return entry;
797 }
798
799
800 CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
801 const char* name) {
802 CodeEntry* entry = new CodeEntry(tag,
803 CodeEntry::kEmptyNamePrefix,
804 GetFunctionName(name),
805 "",
806 v8::CpuProfileNode::kNoLineNumberInfo,
807 TokenEnumerator::kNoSecurityToken);
808 code_entries_.Add(entry);
809 return entry;
810 }
811
812
813 CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
814 const char* name_prefix,
815 String* name) {
816 CodeEntry* entry = new CodeEntry(tag,
817 name_prefix,
818 GetName(name),
819 "",
820 v8::CpuProfileNode::kNoLineNumberInfo,
821 TokenEnumerator::kInheritsSecurityToken);
822 code_entries_.Add(entry);
823 return entry;
824 }
825
826
827 CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
828 int args_count) {
829 CodeEntry* entry = new CodeEntry(tag,
830 "args_count: ",
831 GetName(args_count),
832 "",
833 v8::CpuProfileNode::kNoLineNumberInfo,
834 TokenEnumerator::kInheritsSecurityToken);
835 code_entries_.Add(entry);
836 return entry;
837 }
838
839
840 void CpuProfilesCollection::AddPathToCurrentProfiles(
841 const Vector<CodeEntry*>& path) {
842 // As starting / stopping profiles is rare relatively to this
843 // method, we don't bother minimizing the duration of lock holding,
844 // e.g. copying contents of the list to a local vector.
845 current_profiles_semaphore_->Wait();
846 for (int i = 0; i < current_profiles_.length(); ++i) {
847 current_profiles_[i]->AddPath(path);
848 }
849 current_profiles_semaphore_->Signal();
850 }
851
852
853 void SampleRateCalculator::Tick() {
854 if (--wall_time_query_countdown_ == 0)
855 UpdateMeasurements(OS::TimeCurrentMillis());
856 }
857
858
859 void SampleRateCalculator::UpdateMeasurements(double current_time) {
860 if (measurements_count_++ != 0) {
861 const double measured_ticks_per_ms =
862 (kWallTimeQueryIntervalMs * ticks_per_ms_) /
863 (current_time - last_wall_time_);
864 // Update the average value.
865 ticks_per_ms_ +=
866 (measured_ticks_per_ms - ticks_per_ms_) / measurements_count_;
867 // Update the externally accessible result.
868 result_ = static_cast<AtomicWord>(ticks_per_ms_ * kResultScale);
869 }
870 last_wall_time_ = current_time;
871 wall_time_query_countdown_ =
872 static_cast<unsigned>(kWallTimeQueryIntervalMs * ticks_per_ms_);
873 }
874
875
876 const char* const ProfileGenerator::kAnonymousFunctionName =
877 "(anonymous function)";
878 const char* const ProfileGenerator::kProgramEntryName =
879 "(program)";
880 const char* const ProfileGenerator::kGarbageCollectorEntryName =
881 "(garbage collector)";
882
883
884 ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
885 : profiles_(profiles),
886 program_entry_(
887 profiles->NewCodeEntry(Logger::FUNCTION_TAG, kProgramEntryName)),
888 gc_entry_(
889 profiles->NewCodeEntry(Logger::BUILTIN_TAG,
890 kGarbageCollectorEntryName)) {
891 }
892
893
894 void ProfileGenerator::RecordTickSample(const TickSample& sample) {
895 // Allocate space for stack frames + pc + function + vm-state.
896 ScopedVector<CodeEntry*> entries(sample.frames_count + 3);
897 // As actual number of decoded code entries may vary, initialize
898 // entries vector with NULL values.
899 CodeEntry** entry = entries.start();
900 memset(entry, 0, entries.length() * sizeof(*entry));
901 if (sample.pc != NULL) {
902 *entry++ = code_map_.FindEntry(sample.pc);
903
904 if (sample.has_external_callback) {
905 // Don't use PC when in external callback code, as it can point
906 // inside callback's code, and we will erroneously report
907 // that a callback calls itself.
908 *(entries.start()) = NULL;
909 *entry++ = code_map_.FindEntry(sample.external_callback);
910 } else if (sample.tos != NULL) {
911 // Find out, if top of stack was pointing inside a JS function
912 // meaning that we have encountered a frameless invocation.
913 *entry = code_map_.FindEntry(sample.tos);
914 if (*entry != NULL && !(*entry)->is_js_function()) {
915 *entry = NULL;
916 }
917 entry++;
918 }
919
920 for (const Address* stack_pos = sample.stack,
921 *stack_end = stack_pos + sample.frames_count;
922 stack_pos != stack_end;
923 ++stack_pos) {
924 *entry++ = code_map_.FindEntry(*stack_pos);
925 }
926 }
927
928 if (FLAG_prof_browser_mode) {
929 bool no_symbolized_entries = true;
930 for (CodeEntry** e = entries.start(); e != entry; ++e) {
931 if (*e != NULL) {
932 no_symbolized_entries = false;
933 break;
934 }
935 }
936 // If no frames were symbolized, put the VM state entry in.
937 if (no_symbolized_entries) {
938 *entry++ = EntryForVMState(sample.state);
939 }
940 }
941
942 profiles_->AddPathToCurrentProfiles(entries);
943 }
944
945
946 HeapGraphEdge::HeapGraphEdge(Type type, const char* name, int from, int to) 39 HeapGraphEdge::HeapGraphEdge(Type type, const char* name, int from, int to)
947 : type_(type), 40 : type_(type),
948 from_index_(from), 41 from_index_(from),
949 to_index_(to), 42 to_index_(to),
950 name_(name) { 43 name_(name) {
951 ASSERT(type == kContextVariable 44 ASSERT(type == kContextVariable
952 || type == kProperty 45 || type == kProperty
953 || type == kInternal 46 || type == kInternal
954 || type == kShortcut); 47 || type == kShortcut);
955 } 48 }
(...skipping 2633 matching lines...) Expand 10 before | Expand all | Expand 10 after
3589 2682
3590 2683
3591 void HeapSnapshotJSONSerializer::SortHashMap( 2684 void HeapSnapshotJSONSerializer::SortHashMap(
3592 HashMap* map, List<HashMap::Entry*>* sorted_entries) { 2685 HashMap* map, List<HashMap::Entry*>* sorted_entries) {
3593 for (HashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) 2686 for (HashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p))
3594 sorted_entries->Add(p); 2687 sorted_entries->Add(p);
3595 sorted_entries->Sort(SortUsingEntryValue); 2688 sorted_entries->Sort(SortUsingEntryValue);
3596 } 2689 }
3597 2690
3598 } } // namespace v8::internal 2691 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698