OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 946 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
957 } | 957 } |
958 | 958 |
959 | 959 |
960 const int HeapEntry::kNoEntry = -1; | 960 const int HeapEntry::kNoEntry = -1; |
961 | 961 |
962 HeapEntry::HeapEntry(HeapSnapshot* snapshot, | 962 HeapEntry::HeapEntry(HeapSnapshot* snapshot, |
963 Type type, | 963 Type type, |
964 const char* name, | 964 const char* name, |
965 SnapshotObjectId id, | 965 SnapshotObjectId id, |
966 int self_size) | 966 int self_size) |
967 : painted_(false), | 967 : type_(type), |
968 user_reachable_(false), | |
969 dominator_(kNoEntry), | |
970 type_(type), | |
971 retainers_count_(0), | |
972 retainers_index_(-1), | |
973 children_count_(0), | 968 children_count_(0), |
974 children_index_(-1), | 969 children_index_(-1), |
975 self_size_(self_size), | 970 self_size_(self_size), |
976 retained_size_(0), | |
977 id_(id), | 971 id_(id), |
978 snapshot_(snapshot), | 972 snapshot_(snapshot), |
979 name_(name) { } | 973 name_(name) { } |
980 | 974 |
981 | 975 |
982 void HeapEntry::SetNamedReference(HeapGraphEdge::Type type, | 976 void HeapEntry::SetNamedReference(HeapGraphEdge::Type type, |
983 const char* name, | 977 const char* name, |
984 HeapEntry* entry) { | 978 HeapEntry* entry) { |
985 HeapGraphEdge edge(type, name, this->index(), entry->index()); | 979 HeapGraphEdge edge(type, name, this->index(), entry->index()); |
986 snapshot_->edges().Add(edge); | 980 snapshot_->edges().Add(edge); |
987 ++children_count_; | 981 ++children_count_; |
988 ++entry->retainers_count_; | |
989 } | 982 } |
990 | 983 |
991 | 984 |
992 void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type, | 985 void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type, |
993 int index, | 986 int index, |
994 HeapEntry* entry) { | 987 HeapEntry* entry) { |
995 HeapGraphEdge edge(type, index, this->index(), entry->index()); | 988 HeapGraphEdge edge(type, index, this->index(), entry->index()); |
996 snapshot_->edges().Add(edge); | 989 snapshot_->edges().Add(edge); |
997 ++children_count_; | 990 ++children_count_; |
998 ++entry->retainers_count_; | |
999 } | 991 } |
1000 | 992 |
1001 | 993 |
1002 Handle<HeapObject> HeapEntry::GetHeapObject() { | 994 Handle<HeapObject> HeapEntry::GetHeapObject() { |
1003 return snapshot_->collection()->FindHeapObjectById(id()); | 995 return snapshot_->collection()->FindHeapObjectById(id()); |
1004 } | 996 } |
1005 | 997 |
1006 | 998 |
1007 void HeapEntry::Print( | 999 void HeapEntry::Print( |
1008 const char* prefix, const char* edge_name, int max_depth, int indent) { | 1000 const char* prefix, const char* edge_name, int max_depth, int indent) { |
1009 STATIC_CHECK(sizeof(unsigned) == sizeof(id())); | 1001 STATIC_CHECK(sizeof(unsigned) == sizeof(id())); |
1010 OS::Print("%6d %7d @%6u %*c %s%s: ", | 1002 OS::Print("%6d @%6u %*c %s%s: ", |
1011 self_size(), retained_size(), id(), | 1003 self_size(), id(), indent, ' ', prefix, edge_name); |
1012 indent, ' ', prefix, edge_name); | |
1013 if (type() != kString) { | 1004 if (type() != kString) { |
1014 OS::Print("%s %.40s\n", TypeAsString(), name_); | 1005 OS::Print("%s %.40s\n", TypeAsString(), name_); |
1015 } else { | 1006 } else { |
1016 OS::Print("\""); | 1007 OS::Print("\""); |
1017 const char* c = name_; | 1008 const char* c = name_; |
1018 while (*c && (c - name_) <= 40) { | 1009 while (*c && (c - name_) <= 40) { |
1019 if (*c != '\n') | 1010 if (*c != '\n') |
1020 OS::Print("%c", *c); | 1011 OS::Print("%c", *c); |
1021 else | 1012 else |
1022 OS::Print("\\n"); | 1013 OS::Print("\\n"); |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1084 | 1075 |
1085 | 1076 |
1086 // It is very important to keep objects that form a heap snapshot | 1077 // It is very important to keep objects that form a heap snapshot |
1087 // as small as possible. | 1078 // as small as possible. |
1088 namespace { // Avoid littering the global namespace. | 1079 namespace { // Avoid littering the global namespace. |
1089 | 1080 |
1090 template <size_t ptr_size> struct SnapshotSizeConstants; | 1081 template <size_t ptr_size> struct SnapshotSizeConstants; |
1091 | 1082 |
1092 template <> struct SnapshotSizeConstants<4> { | 1083 template <> struct SnapshotSizeConstants<4> { |
1093 static const int kExpectedHeapGraphEdgeSize = 12; | 1084 static const int kExpectedHeapGraphEdgeSize = 12; |
1094 static const int kExpectedHeapEntrySize = 40; | 1085 static const int kExpectedHeapEntrySize = 24; |
1095 static const size_t kMaxSerializableSnapshotRawSize = 256 * MB; | 1086 static const size_t kMaxSerializableSnapshotRawSize = 256 * MB; |
1096 }; | 1087 }; |
1097 | 1088 |
1098 template <> struct SnapshotSizeConstants<8> { | 1089 template <> struct SnapshotSizeConstants<8> { |
1099 static const int kExpectedHeapGraphEdgeSize = 24; | 1090 static const int kExpectedHeapGraphEdgeSize = 24; |
1100 static const int kExpectedHeapEntrySize = 48; | 1091 static const int kExpectedHeapEntrySize = 32; |
1101 static const uint64_t kMaxSerializableSnapshotRawSize = | 1092 static const uint64_t kMaxSerializableSnapshotRawSize = |
1102 static_cast<uint64_t>(6000) * MB; | 1093 static_cast<uint64_t>(6000) * MB; |
1103 }; | 1094 }; |
1104 | 1095 |
1105 } // namespace | 1096 } // namespace |
1106 | 1097 |
1107 HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection, | 1098 HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection, |
1108 HeapSnapshot::Type type, | 1099 HeapSnapshot::Type type, |
1109 const char* title, | 1100 const char* title, |
1110 unsigned uid) | 1101 unsigned uid) |
(...skipping 21 matching lines...) Expand all Loading... |
1132 collection_->RemoveSnapshot(this); | 1123 collection_->RemoveSnapshot(this); |
1133 delete this; | 1124 delete this; |
1134 } | 1125 } |
1135 | 1126 |
1136 | 1127 |
1137 void HeapSnapshot::RememberLastJSObjectId() { | 1128 void HeapSnapshot::RememberLastJSObjectId() { |
1138 max_snapshot_js_object_id_ = collection_->last_assigned_id(); | 1129 max_snapshot_js_object_id_ = collection_->last_assigned_id(); |
1139 } | 1130 } |
1140 | 1131 |
1141 | 1132 |
1142 static void HeapEntryClearPaint(HeapEntry* entry_ptr) { | |
1143 entry_ptr->clear_paint(); | |
1144 } | |
1145 | |
1146 | |
1147 void HeapSnapshot::ClearPaint() { | |
1148 entries_.Iterate(HeapEntryClearPaint); | |
1149 } | |
1150 | |
1151 | |
1152 HeapEntry* HeapSnapshot::AddRootEntry() { | 1133 HeapEntry* HeapSnapshot::AddRootEntry() { |
1153 ASSERT(root_index_ == HeapEntry::kNoEntry); | 1134 ASSERT(root_index_ == HeapEntry::kNoEntry); |
1154 ASSERT(entries_.is_empty()); // Root entry must be the first one. | 1135 ASSERT(entries_.is_empty()); // Root entry must be the first one. |
1155 HeapEntry* entry = AddEntry(HeapEntry::kObject, | 1136 HeapEntry* entry = AddEntry(HeapEntry::kObject, |
1156 "", | 1137 "", |
1157 HeapObjectsMap::kInternalRootObjectId, | 1138 HeapObjectsMap::kInternalRootObjectId, |
1158 0); | 1139 0); |
1159 root_index_ = entry->index(); | 1140 root_index_ = entry->index(); |
1160 ASSERT(root_index_ == 0); | 1141 ASSERT(root_index_ == 0); |
1161 return entry; | 1142 return entry; |
(...skipping 27 matching lines...) Expand all Loading... |
1189 HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type, | 1170 HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type, |
1190 const char* name, | 1171 const char* name, |
1191 SnapshotObjectId id, | 1172 SnapshotObjectId id, |
1192 int size) { | 1173 int size) { |
1193 HeapEntry entry(this, type, name, id, size); | 1174 HeapEntry entry(this, type, name, id, size); |
1194 entries_.Add(entry); | 1175 entries_.Add(entry); |
1195 return &entries_.last(); | 1176 return &entries_.last(); |
1196 } | 1177 } |
1197 | 1178 |
1198 | 1179 |
1199 void HeapSnapshot::FillChildrenAndRetainers() { | 1180 void HeapSnapshot::FillChildren() { |
1200 ASSERT(children().is_empty()); | 1181 ASSERT(children().is_empty()); |
1201 children().Allocate(edges().length()); | 1182 children().Allocate(edges().length()); |
1202 ASSERT(retainers().is_empty()); | |
1203 retainers().Allocate(edges().length()); | |
1204 int children_index = 0; | 1183 int children_index = 0; |
1205 int retainers_index = 0; | |
1206 for (int i = 0; i < entries().length(); ++i) { | 1184 for (int i = 0; i < entries().length(); ++i) { |
1207 HeapEntry* entry = &entries()[i]; | 1185 HeapEntry* entry = &entries()[i]; |
1208 children_index = entry->set_children_index(children_index); | 1186 children_index = entry->set_children_index(children_index); |
1209 retainers_index = entry->set_retainers_index(retainers_index); | |
1210 } | 1187 } |
1211 ASSERT(edges().length() == children_index); | 1188 ASSERT(edges().length() == children_index); |
1212 ASSERT(edges().length() == retainers_index); | |
1213 for (int i = 0; i < edges().length(); ++i) { | 1189 for (int i = 0; i < edges().length(); ++i) { |
1214 HeapGraphEdge* edge = &edges()[i]; | 1190 HeapGraphEdge* edge = &edges()[i]; |
1215 edge->ReplaceToIndexWithEntry(this); | 1191 edge->ReplaceToIndexWithEntry(this); |
1216 edge->from()->add_child(edge); | 1192 edge->from()->add_child(edge); |
1217 edge->to()->add_retainer(edge); | |
1218 } | 1193 } |
1219 } | 1194 } |
1220 | 1195 |
1221 | |
1222 void HeapSnapshot::SetDominatorsToSelf() { | |
1223 for (int i = 0; i < entries_.length(); ++i) { | |
1224 entries_[i].set_dominator(&entries_[i]); | |
1225 } | |
1226 } | |
1227 | |
1228 | 1196 |
1229 class FindEntryById { | 1197 class FindEntryById { |
1230 public: | 1198 public: |
1231 explicit FindEntryById(SnapshotObjectId id) : id_(id) { } | 1199 explicit FindEntryById(SnapshotObjectId id) : id_(id) { } |
1232 int operator()(HeapEntry* const* entry) { | 1200 int operator()(HeapEntry* const* entry) { |
1233 if ((*entry)->id() == id_) return 0; | 1201 if ((*entry)->id() == id_) return 0; |
1234 return (*entry)->id() < id_ ? -1 : 1; | 1202 return (*entry)->id() < id_ ? -1 : 1; |
1235 } | 1203 } |
1236 private: | 1204 private: |
1237 SnapshotObjectId id_; | 1205 SnapshotObjectId id_; |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1277 static size_t GetMemoryUsedByList(const List<T, P>& list) { | 1245 static size_t GetMemoryUsedByList(const List<T, P>& list) { |
1278 return list.capacity() * sizeof(T); | 1246 return list.capacity() * sizeof(T); |
1279 } | 1247 } |
1280 | 1248 |
1281 | 1249 |
1282 size_t HeapSnapshot::RawSnapshotSize() const { | 1250 size_t HeapSnapshot::RawSnapshotSize() const { |
1283 return | 1251 return |
1284 GetMemoryUsedByList(entries_) + | 1252 GetMemoryUsedByList(entries_) + |
1285 GetMemoryUsedByList(edges_) + | 1253 GetMemoryUsedByList(edges_) + |
1286 GetMemoryUsedByList(children_) + | 1254 GetMemoryUsedByList(children_) + |
1287 GetMemoryUsedByList(retainers_) + | |
1288 GetMemoryUsedByList(sorted_entries_); | 1255 GetMemoryUsedByList(sorted_entries_); |
1289 } | 1256 } |
1290 | 1257 |
1291 | 1258 |
1292 // We split IDs on evens for embedder objects (see | 1259 // We split IDs on evens for embedder objects (see |
1293 // HeapObjectsMap::GenerateId) and odds for native objects. | 1260 // HeapObjectsMap::GenerateId) and odds for native objects. |
1294 const SnapshotObjectId HeapObjectsMap::kInternalRootObjectId = 1; | 1261 const SnapshotObjectId HeapObjectsMap::kInternalRootObjectId = 1; |
1295 const SnapshotObjectId HeapObjectsMap::kGcRootsObjectId = | 1262 const SnapshotObjectId HeapObjectsMap::kGcRootsObjectId = |
1296 HeapObjectsMap::kInternalRootObjectId + HeapObjectsMap::kObjectIdStep; | 1263 HeapObjectsMap::kInternalRootObjectId + HeapObjectsMap::kObjectIdStep; |
1297 const SnapshotObjectId HeapObjectsMap::kGcRootsFirstSubrootId = | 1264 const SnapshotObjectId HeapObjectsMap::kGcRootsFirstSubrootId = |
(...skipping 1786 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3084 #endif | 3051 #endif |
3085 | 3052 |
3086 SetProgressTotal(1); // 1 pass. | 3053 SetProgressTotal(1); // 1 pass. |
3087 | 3054 |
3088 #ifdef DEBUG | 3055 #ifdef DEBUG |
3089 debug_heap->Verify(); | 3056 debug_heap->Verify(); |
3090 #endif | 3057 #endif |
3091 | 3058 |
3092 if (!FillReferences()) return false; | 3059 if (!FillReferences()) return false; |
3093 | 3060 |
3094 snapshot_->FillChildrenAndRetainers(); | 3061 snapshot_->FillChildren(); |
3095 snapshot_->RememberLastJSObjectId(); | 3062 snapshot_->RememberLastJSObjectId(); |
3096 | 3063 |
3097 if (!SetEntriesDominators()) return false; | |
3098 if (!CalculateRetainedSizes()) return false; | |
3099 | |
3100 progress_counter_ = progress_total_; | 3064 progress_counter_ = progress_total_; |
3101 if (!ProgressReport(true)) return false; | 3065 if (!ProgressReport(true)) return false; |
3102 return true; | 3066 return true; |
3103 } | 3067 } |
3104 | 3068 |
3105 | 3069 |
3106 void HeapSnapshotGenerator::ProgressStep() { | 3070 void HeapSnapshotGenerator::ProgressStep() { |
3107 ++progress_counter_; | 3071 ++progress_counter_; |
3108 } | 3072 } |
3109 | 3073 |
(...skipping 21 matching lines...) Expand all Loading... |
3131 | 3095 |
3132 | 3096 |
3133 bool HeapSnapshotGenerator::FillReferences() { | 3097 bool HeapSnapshotGenerator::FillReferences() { |
3134 SnapshotFiller filler(snapshot_, &entries_); | 3098 SnapshotFiller filler(snapshot_, &entries_); |
3135 v8_heap_explorer_.AddRootEntries(&filler); | 3099 v8_heap_explorer_.AddRootEntries(&filler); |
3136 return v8_heap_explorer_.IterateAndExtractReferences(&filler) | 3100 return v8_heap_explorer_.IterateAndExtractReferences(&filler) |
3137 && dom_explorer_.IterateAndExtractReferences(&filler); | 3101 && dom_explorer_.IterateAndExtractReferences(&filler); |
3138 } | 3102 } |
3139 | 3103 |
3140 | 3104 |
3141 bool HeapSnapshotGenerator::IsUserGlobalReference(const HeapGraphEdge* edge) { | |
3142 ASSERT(edge->from() == snapshot_->root()); | |
3143 return edge->type() == HeapGraphEdge::kShortcut; | |
3144 } | |
3145 | |
3146 | |
3147 void HeapSnapshotGenerator::MarkUserReachableObjects() { | |
3148 List<HeapEntry*> worklist; | |
3149 | |
3150 Vector<HeapGraphEdge*> children = snapshot_->root()->children(); | |
3151 for (int i = 0; i < children.length(); ++i) { | |
3152 if (IsUserGlobalReference(children[i])) { | |
3153 worklist.Add(children[i]->to()); | |
3154 } | |
3155 } | |
3156 | |
3157 while (!worklist.is_empty()) { | |
3158 HeapEntry* entry = worklist.RemoveLast(); | |
3159 if (entry->user_reachable()) continue; | |
3160 entry->set_user_reachable(); | |
3161 Vector<HeapGraphEdge*> children = entry->children(); | |
3162 for (int i = 0; i < children.length(); ++i) { | |
3163 HeapEntry* child = children[i]->to(); | |
3164 if (!child->user_reachable()) { | |
3165 worklist.Add(child); | |
3166 } | |
3167 } | |
3168 } | |
3169 } | |
3170 | |
3171 | |
3172 static bool IsRetainingEdge(HeapGraphEdge* edge) { | |
3173 if (edge->type() == HeapGraphEdge::kShortcut) return false; | |
3174 // The edge is not retaining if it goes from system domain | |
3175 // (i.e. an object not reachable from window) to the user domain | |
3176 // (i.e. a reachable object). | |
3177 return edge->from()->user_reachable() | |
3178 || !edge->to()->user_reachable(); | |
3179 } | |
3180 | |
3181 | |
3182 void HeapSnapshotGenerator::FillPostorderIndexes( | |
3183 Vector<HeapEntry*>* entries) { | |
3184 snapshot_->ClearPaint(); | |
3185 int current_entry = 0; | |
3186 List<HeapEntry*> nodes_to_visit; | |
3187 HeapEntry* root = snapshot_->root(); | |
3188 nodes_to_visit.Add(root); | |
3189 snapshot_->root()->paint(); | |
3190 while (!nodes_to_visit.is_empty()) { | |
3191 HeapEntry* entry = nodes_to_visit.last(); | |
3192 Vector<HeapGraphEdge*> children = entry->children(); | |
3193 bool has_new_edges = false; | |
3194 for (int i = 0; i < children.length(); ++i) { | |
3195 if (entry != root && !IsRetainingEdge(children[i])) continue; | |
3196 HeapEntry* child = children[i]->to(); | |
3197 if (!child->painted()) { | |
3198 nodes_to_visit.Add(child); | |
3199 child->paint(); | |
3200 has_new_edges = true; | |
3201 } | |
3202 } | |
3203 if (!has_new_edges) { | |
3204 entry->set_postorder_index(current_entry); | |
3205 (*entries)[current_entry++] = entry; | |
3206 nodes_to_visit.RemoveLast(); | |
3207 } | |
3208 } | |
3209 ASSERT_EQ(current_entry, entries->length()); | |
3210 } | |
3211 | |
3212 | |
3213 static int Intersect(int i1, int i2, const Vector<int>& dominators) { | |
3214 int finger1 = i1, finger2 = i2; | |
3215 while (finger1 != finger2) { | |
3216 while (finger1 < finger2) finger1 = dominators[finger1]; | |
3217 while (finger2 < finger1) finger2 = dominators[finger2]; | |
3218 } | |
3219 return finger1; | |
3220 } | |
3221 | |
3222 | |
3223 // The algorithm is based on the article: | |
3224 // K. Cooper, T. Harvey and K. Kennedy "A Simple, Fast Dominance Algorithm" | |
3225 // Softw. Pract. Exper. 4 (2001), pp. 1-10. | |
3226 bool HeapSnapshotGenerator::BuildDominatorTree( | |
3227 const Vector<HeapEntry*>& entries, | |
3228 Vector<int>* dominators) { | |
3229 if (entries.length() == 0) return true; | |
3230 HeapEntry* root = snapshot_->root(); | |
3231 const int entries_length = entries.length(), root_index = entries_length - 1; | |
3232 for (int i = 0; i < root_index; ++i) (*dominators)[i] = HeapEntry::kNoEntry; | |
3233 (*dominators)[root_index] = root_index; | |
3234 | |
3235 // The affected array is used to mark entries which dominators | |
3236 // have to be racalculated because of changes in their retainers. | |
3237 ScopedVector<bool> affected(entries_length); | |
3238 for (int i = 0; i < affected.length(); ++i) affected[i] = false; | |
3239 // Mark the root direct children as affected. | |
3240 Vector<HeapGraphEdge*> children = entries[root_index]->children(); | |
3241 for (int i = 0; i < children.length(); ++i) { | |
3242 affected[children[i]->to()->postorder_index()] = true; | |
3243 } | |
3244 | |
3245 bool changed = true; | |
3246 while (changed) { | |
3247 changed = false; | |
3248 if (!ProgressReport(false)) return false; | |
3249 for (int i = root_index - 1; i >= 0; --i) { | |
3250 if (!affected[i]) continue; | |
3251 affected[i] = false; | |
3252 // If dominator of the entry has already been set to root, | |
3253 // then it can't propagate any further. | |
3254 if ((*dominators)[i] == root_index) continue; | |
3255 int new_idom_index = HeapEntry::kNoEntry; | |
3256 Vector<HeapGraphEdge*> rets = entries[i]->retainers(); | |
3257 for (int j = 0; j < rets.length(); ++j) { | |
3258 if (rets[j]->from() != root && !IsRetainingEdge(rets[j])) continue; | |
3259 int ret_index = rets[j]->from()->postorder_index(); | |
3260 if (dominators->at(ret_index) != HeapEntry::kNoEntry) { | |
3261 new_idom_index = new_idom_index == HeapEntry::kNoEntry | |
3262 ? ret_index | |
3263 : Intersect(ret_index, new_idom_index, *dominators); | |
3264 // If idom has already reached the root, it doesn't make sense | |
3265 // to check other retainers. | |
3266 if (new_idom_index == root_index) break; | |
3267 } | |
3268 } | |
3269 if (new_idom_index != HeapEntry::kNoEntry | |
3270 && dominators->at(i) != new_idom_index) { | |
3271 (*dominators)[i] = new_idom_index; | |
3272 changed = true; | |
3273 Vector<HeapGraphEdge*> children = entries[i]->children(); | |
3274 for (int j = 0; j < children.length(); ++j) { | |
3275 affected[children[j]->to()->postorder_index()] = true; | |
3276 } | |
3277 } | |
3278 } | |
3279 } | |
3280 return true; | |
3281 } | |
3282 | |
3283 | |
3284 bool HeapSnapshotGenerator::SetEntriesDominators() { | |
3285 MarkUserReachableObjects(); | |
3286 // This array is used for maintaining postorder of nodes. | |
3287 ScopedVector<HeapEntry*> ordered_entries(snapshot_->entries().length()); | |
3288 FillPostorderIndexes(&ordered_entries); | |
3289 ScopedVector<int> dominators(ordered_entries.length()); | |
3290 if (!BuildDominatorTree(ordered_entries, &dominators)) return false; | |
3291 for (int i = 0; i < ordered_entries.length(); ++i) { | |
3292 ASSERT(dominators[i] != HeapEntry::kNoEntry); | |
3293 ordered_entries[i]->set_dominator(ordered_entries[dominators[i]]); | |
3294 } | |
3295 return true; | |
3296 } | |
3297 | |
3298 | |
3299 bool HeapSnapshotGenerator::CalculateRetainedSizes() { | |
3300 // As for the dominators tree we only know parent nodes, not | |
3301 // children, to sum up total sizes we "bubble" node's self size | |
3302 // adding it to all of its parents. | |
3303 List<HeapEntry>& entries = snapshot_->entries(); | |
3304 for (int i = 0; i < entries.length(); ++i) { | |
3305 HeapEntry* entry = &entries[i]; | |
3306 entry->set_retained_size(entry->self_size()); | |
3307 } | |
3308 for (int i = 0; i < entries.length(); ++i) { | |
3309 int entry_size = entries[i].self_size(); | |
3310 HeapEntry* current = &entries[i]; | |
3311 for (HeapEntry* dominator = current->dominator(); | |
3312 dominator != current; | |
3313 current = dominator, dominator = current->dominator()) { | |
3314 ASSERT(current->dominator() != NULL); | |
3315 dominator->add_retained_size(entry_size); | |
3316 } | |
3317 } | |
3318 return true; | |
3319 } | |
3320 | |
3321 | |
3322 template<int bytes> struct MaxDecimalDigitsIn; | 3105 template<int bytes> struct MaxDecimalDigitsIn; |
3323 template<> struct MaxDecimalDigitsIn<4> { | 3106 template<> struct MaxDecimalDigitsIn<4> { |
3324 static const int kSigned = 11; | 3107 static const int kSigned = 11; |
3325 static const int kUnsigned = 10; | 3108 static const int kUnsigned = 10; |
3326 }; | 3109 }; |
3327 template<> struct MaxDecimalDigitsIn<8> { | 3110 template<> struct MaxDecimalDigitsIn<8> { |
3328 static const int kSigned = 20; | 3111 static const int kSigned = 20; |
3329 static const int kUnsigned = 20; | 3112 static const int kUnsigned = 20; |
3330 }; | 3113 }; |
3331 | 3114 |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3410 v8::OutputStream* stream_; | 3193 v8::OutputStream* stream_; |
3411 int chunk_size_; | 3194 int chunk_size_; |
3412 ScopedVector<char> chunk_; | 3195 ScopedVector<char> chunk_; |
3413 int chunk_pos_; | 3196 int chunk_pos_; |
3414 bool aborted_; | 3197 bool aborted_; |
3415 }; | 3198 }; |
3416 | 3199 |
3417 | 3200 |
3418 // type, name|index, to_node. | 3201 // type, name|index, to_node. |
3419 const int HeapSnapshotJSONSerializer::kEdgeFieldsCount = 3; | 3202 const int HeapSnapshotJSONSerializer::kEdgeFieldsCount = 3; |
3420 // type, name, id, self_size, retained_size, dominator, children_index. | 3203 // type, name, id, self_size, children_index. |
3421 const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 7; | 3204 const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 5; |
3422 | 3205 |
3423 void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) { | 3206 void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) { |
3424 ASSERT(writer_ == NULL); | 3207 ASSERT(writer_ == NULL); |
3425 writer_ = new OutputStreamWriter(stream); | 3208 writer_ = new OutputStreamWriter(stream); |
3426 | 3209 |
3427 HeapSnapshot* original_snapshot = NULL; | 3210 HeapSnapshot* original_snapshot = NULL; |
3428 if (snapshot_->RawSnapshotSize() >= | 3211 if (snapshot_->RawSnapshotSize() >= |
3429 SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize) { | 3212 SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize) { |
3430 // The snapshot is too big. Serialize a fake snapshot. | 3213 // The snapshot is too big. Serialize a fake snapshot. |
3431 original_snapshot = snapshot_; | 3214 original_snapshot = snapshot_; |
(...skipping 19 matching lines...) Expand all Loading... |
3451 snapshot_->uid()); | 3234 snapshot_->uid()); |
3452 result->AddRootEntry(); | 3235 result->AddRootEntry(); |
3453 const char* text = snapshot_->collection()->names()->GetFormatted( | 3236 const char* text = snapshot_->collection()->names()->GetFormatted( |
3454 "The snapshot is too big. " | 3237 "The snapshot is too big. " |
3455 "Maximum snapshot size is %" V8_PTR_PREFIX "u MB. " | 3238 "Maximum snapshot size is %" V8_PTR_PREFIX "u MB. " |
3456 "Actual snapshot size is %" V8_PTR_PREFIX "u MB.", | 3239 "Actual snapshot size is %" V8_PTR_PREFIX "u MB.", |
3457 SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize / MB, | 3240 SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize / MB, |
3458 (snapshot_->RawSnapshotSize() + MB - 1) / MB); | 3241 (snapshot_->RawSnapshotSize() + MB - 1) / MB); |
3459 HeapEntry* message = result->AddEntry(HeapEntry::kString, text, 0, 4); | 3242 HeapEntry* message = result->AddEntry(HeapEntry::kString, text, 0, 4); |
3460 result->root()->SetIndexedReference(HeapGraphEdge::kElement, 1, message); | 3243 result->root()->SetIndexedReference(HeapGraphEdge::kElement, 1, message); |
3461 result->FillChildrenAndRetainers(); | 3244 result->FillChildren(); |
3462 result->SetDominatorsToSelf(); | |
3463 return result; | 3245 return result; |
3464 } | 3246 } |
3465 | 3247 |
3466 | 3248 |
3467 void HeapSnapshotJSONSerializer::SerializeImpl() { | 3249 void HeapSnapshotJSONSerializer::SerializeImpl() { |
3468 List<HeapEntry>& nodes = snapshot_->entries(); | 3250 List<HeapEntry>& nodes = snapshot_->entries(); |
3469 ASSERT(0 == snapshot_->root()->index()); | 3251 ASSERT(0 == snapshot_->root()->index()); |
3470 writer_->AddCharacter('{'); | 3252 writer_->AddCharacter('{'); |
3471 writer_->AddString("\"snapshot\":{"); | 3253 writer_->AddString("\"snapshot\":{"); |
3472 SerializeSnapshot(); | 3254 SerializeSnapshot(); |
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3550 SerializeEdge(children[j], first_edge); | 3332 SerializeEdge(children[j], first_edge); |
3551 first_edge = false; | 3333 first_edge = false; |
3552 if (writer_->aborted()) return; | 3334 if (writer_->aborted()) return; |
3553 } | 3335 } |
3554 } | 3336 } |
3555 } | 3337 } |
3556 | 3338 |
3557 | 3339 |
3558 void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry, | 3340 void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry, |
3559 int edges_index) { | 3341 int edges_index) { |
3560 // The buffer needs space for 6 ints, 1 uint32_t, 7 commas, \n and \0 | 3342 // The buffer needs space for 5 uint32_t, 5 commas, \n and \0 |
3561 static const int kBufferSize = | 3343 static const int kBufferSize = |
3562 6 * MaxDecimalDigitsIn<sizeof(int)>::kSigned // NOLINT | 3344 5 * MaxDecimalDigitsIn<sizeof(uint32_t)>::kUnsigned // NOLINT |
3563 + MaxDecimalDigitsIn<sizeof(uint32_t)>::kUnsigned // NOLINT | 3345 + 5 + 1 + 1; |
3564 + 7 + 1 + 1; | |
3565 EmbeddedVector<char, kBufferSize> buffer; | 3346 EmbeddedVector<char, kBufferSize> buffer; |
3566 int buffer_pos = 0; | 3347 int buffer_pos = 0; |
3567 if (entry_index(entry) != 0) { | 3348 if (entry_index(entry) != 0) { |
3568 buffer[buffer_pos++] = ','; | 3349 buffer[buffer_pos++] = ','; |
3569 } | 3350 } |
3570 buffer_pos = utoa(entry->type(), buffer, buffer_pos); | 3351 buffer_pos = utoa(entry->type(), buffer, buffer_pos); |
3571 buffer[buffer_pos++] = ','; | 3352 buffer[buffer_pos++] = ','; |
3572 buffer_pos = utoa(GetStringId(entry->name()), buffer, buffer_pos); | 3353 buffer_pos = utoa(GetStringId(entry->name()), buffer, buffer_pos); |
3573 buffer[buffer_pos++] = ','; | 3354 buffer[buffer_pos++] = ','; |
3574 buffer_pos = utoa(entry->id(), buffer, buffer_pos); | 3355 buffer_pos = utoa(entry->id(), buffer, buffer_pos); |
3575 buffer[buffer_pos++] = ','; | 3356 buffer[buffer_pos++] = ','; |
3576 buffer_pos = utoa(entry->self_size(), buffer, buffer_pos); | 3357 buffer_pos = utoa(entry->self_size(), buffer, buffer_pos); |
3577 buffer[buffer_pos++] = ','; | 3358 buffer[buffer_pos++] = ','; |
3578 buffer_pos = utoa(entry->retained_size(), buffer, buffer_pos); | |
3579 buffer[buffer_pos++] = ','; | |
3580 buffer_pos = utoa(entry_index(entry->dominator()), buffer, buffer_pos); | |
3581 buffer[buffer_pos++] = ','; | |
3582 buffer_pos = utoa(edges_index, buffer, buffer_pos); | 3359 buffer_pos = utoa(edges_index, buffer, buffer_pos); |
3583 buffer[buffer_pos++] = '\n'; | 3360 buffer[buffer_pos++] = '\n'; |
3584 buffer[buffer_pos++] = '\0'; | 3361 buffer[buffer_pos++] = '\0'; |
3585 writer_->AddString(buffer.start()); | 3362 writer_->AddString(buffer.start()); |
3586 } | 3363 } |
3587 | 3364 |
3588 | 3365 |
3589 void HeapSnapshotJSONSerializer::SerializeNodes(const List<HeapEntry>& nodes) { | 3366 void HeapSnapshotJSONSerializer::SerializeNodes(const List<HeapEntry>& nodes) { |
3590 int edges_index = 0; | 3367 int edges_index = 0; |
3591 for (int i = 0; i < nodes.length(); ++i) { | 3368 for (int i = 0; i < nodes.length(); ++i) { |
(...skipping 16 matching lines...) Expand all Loading... |
3608 // We use a set of macros to improve readability. | 3385 // We use a set of macros to improve readability. |
3609 #define JSON_A(s) "[" s "]" | 3386 #define JSON_A(s) "[" s "]" |
3610 #define JSON_O(s) "{" s "}" | 3387 #define JSON_O(s) "{" s "}" |
3611 #define JSON_S(s) "\"" s "\"" | 3388 #define JSON_S(s) "\"" s "\"" |
3612 writer_->AddString(JSON_O( | 3389 writer_->AddString(JSON_O( |
3613 JSON_S("node_fields") ":" JSON_A( | 3390 JSON_S("node_fields") ":" JSON_A( |
3614 JSON_S("type") "," | 3391 JSON_S("type") "," |
3615 JSON_S("name") "," | 3392 JSON_S("name") "," |
3616 JSON_S("id") "," | 3393 JSON_S("id") "," |
3617 JSON_S("self_size") "," | 3394 JSON_S("self_size") "," |
3618 JSON_S("retained_size") "," | |
3619 JSON_S("dominator") "," | |
3620 JSON_S("edges_index")) "," | 3395 JSON_S("edges_index")) "," |
3621 JSON_S("node_types") ":" JSON_A( | 3396 JSON_S("node_types") ":" JSON_A( |
3622 JSON_A( | 3397 JSON_A( |
3623 JSON_S("hidden") "," | 3398 JSON_S("hidden") "," |
3624 JSON_S("array") "," | 3399 JSON_S("array") "," |
3625 JSON_S("string") "," | 3400 JSON_S("string") "," |
3626 JSON_S("object") "," | 3401 JSON_S("object") "," |
3627 JSON_S("code") "," | 3402 JSON_S("code") "," |
3628 JSON_S("closure") "," | 3403 JSON_S("closure") "," |
3629 JSON_S("regexp") "," | 3404 JSON_S("regexp") "," |
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3748 | 3523 |
3749 | 3524 |
3750 void HeapSnapshotJSONSerializer::SortHashMap( | 3525 void HeapSnapshotJSONSerializer::SortHashMap( |
3751 HashMap* map, List<HashMap::Entry*>* sorted_entries) { | 3526 HashMap* map, List<HashMap::Entry*>* sorted_entries) { |
3752 for (HashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) | 3527 for (HashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) |
3753 sorted_entries->Add(p); | 3528 sorted_entries->Add(p); |
3754 sorted_entries->Sort(SortUsingEntryValue); | 3529 sorted_entries->Sort(SortUsingEntryValue); |
3755 } | 3530 } |
3756 | 3531 |
3757 } } // namespace v8::internal | 3532 } } // namespace v8::internal |
OLD | NEW |