OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 4917 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4928 | 4928 |
4929 | 4929 |
4930 void Map::RemoveFromCodeCache(String* name, Code* code, int index) { | 4930 void Map::RemoveFromCodeCache(String* name, Code* code, int index) { |
4931 // No GC is supposed to happen between a call to IndexInCodeCache and | 4931 // No GC is supposed to happen between a call to IndexInCodeCache and |
4932 // RemoveFromCodeCache so the code cache must be there. | 4932 // RemoveFromCodeCache so the code cache must be there. |
4933 ASSERT(!code_cache()->IsFixedArray()); | 4933 ASSERT(!code_cache()->IsFixedArray()); |
4934 CodeCache::cast(code_cache())->RemoveByIndex(name, code, index); | 4934 CodeCache::cast(code_cache())->RemoveByIndex(name, code, index); |
4935 } | 4935 } |
4936 | 4936 |
4937 | 4937 |
4938 // An iterator over all map transitions in an descriptor array, reusing the map | |
4939 // field of the contens array while it is running. | |
4940 class IterableDescriptorArray : public DescriptorArray { | |
4941 public: | |
4942 void Start() { | |
4943 ASSERT(!IsRunning()); | |
4944 if (HasContentArray()) *ContentHeader() = Smi::FromInt(0); | |
4945 } | |
4946 | |
4947 bool IsRunning() { | |
Vyacheslav Egorov (Chromium)
2012/01/20 12:04:54
I find the name confusing because descriptor itsel
Sven Panne
2012/01/20 13:15:33
If we really wanted to be clean, we should introdu
Vyacheslav Egorov (Chromium)
2012/01/20 13:59:50
IsIterating sounds fine.
| |
4948 return HasContentArray() && (*ContentHeader())->IsSmi(); | |
4949 } | |
4950 | |
4951 Map* Next() { | |
4952 ASSERT(IsRunning()); | |
4953 FixedArray* contents = ContentArray(); | |
4954 int index = Smi::cast(*ContentHeader())->value(); | |
4955 while (index < contents->length()) { | |
4956 int next_index = index + 2; | |
4957 PropertyDetails details(Smi::cast(contents->get(index + 1))); | |
4958 if (details.IsTransition()) { | |
4959 *ContentHeader() = Smi::FromInt(next_index); | |
4960 return static_cast<Map*>(contents->get(index)); | |
4961 } | |
4962 index = next_index; | |
4963 } | |
4964 *ContentHeader() = GetHeap()->fixed_array_map(); | |
4965 return NULL; | |
4966 } | |
4967 | |
4968 private: | |
4969 bool HasContentArray() { return length() > kContentArrayIndex; } | |
4970 | |
4971 FixedArray* ContentArray() { | |
4972 return static_cast<FixedArray*>(get(kContentArrayIndex)); | |
4973 } | |
4974 | |
4975 Object** ContentHeader() { | |
4976 return RawField(ContentArray(), kMapOffset); | |
4977 } | |
4978 }; | |
4979 | |
4980 | |
4981 // An iterator over all prototype transitions, reusing the map field of the | |
4982 // underlying array while it is running. | |
4983 class IterablePrototypeTransitions : public FixedArray { | |
Vyacheslav Egorov (Chromium)
2012/01/20 12:04:54
I think parts IterableDescriptorArray & IterablePr
Sven Panne
2012/01/20 13:15:33
There are several reasons why I would prefer to av
Vyacheslav Egorov (Chromium)
2012/01/20 13:59:50
* Yes, it is a pure accident that both are fixed a
Sven Panne
2012/01/20 14:54:50
What code would actually be shared? The structure
Vyacheslav Egorov (Chromium)
2012/01/20 16:30:10
I outlined what I think can be shared in my intrus
| |
4984 public: | |
4985 void Start() { | |
4986 ASSERT(!IsRunning()); | |
4987 if (HasTransitions()) *Header() = Smi::FromInt(0); | |
4988 } | |
4989 | |
4990 bool IsRunning() { | |
4991 return HasTransitions() && (*Header())->IsSmi(); | |
4992 } | |
4993 | |
4994 Map* Next() { | |
4995 ASSERT(IsRunning()); | |
4996 int index = Smi::cast(*Header())->value(); | |
4997 if (index < NumberOfTransitions()) { | |
4998 *Header() = Smi::FromInt(index + 1); | |
4999 return GetTransition(index); | |
5000 } | |
5001 *Header() = GetHeap()->fixed_array_map(); | |
5002 return NULL; | |
5003 } | |
5004 | |
5005 private: | |
5006 bool HasTransitions() { return length() >= Map::kProtoTransitionHeaderSize; } | |
5007 | |
5008 Object** Header() { | |
5009 return RawField(this, kMapOffset); | |
5010 } | |
5011 | |
5012 int NumberOfTransitions() { | |
5013 return Smi::cast(get(Map::kProtoTransitionNumberOfEntriesOffset))->value(); | |
5014 } | |
5015 | |
5016 Map* GetTransition(int transitionNumber) { | |
5017 return Map::cast( | |
5018 get(Map::kProtoTransitionHeaderSize + | |
5019 Map::kProtoTransitionMapOffset + | |
5020 transitionNumber * Map::kProtoTransitionElementsPerEntry)); | |
5021 } | |
5022 }; | |
5023 | |
5024 | |
5025 // To traverse the transition tree iteratively, we have to store two kinds of | |
5026 // information in a map: The parent map in the traversal and which children of a | |
5027 // node have already been visited. To do this without additional memory, we | |
5028 // temporarily reuse two maps with known values: | |
5029 // | |
5030 // (1) The map of the map temporarily holds the parent, and is restored to the | |
5031 // meta map afterwards. | |
5032 // | |
5033 // (2) The info which children have already been visited depends on which part | |
5034 // of the map we currently iterate: | |
5035 // | |
5036 // (a) If we currently follow normal map transitions, we temporarily store | |
5037 // the current index in the map of the FixedArray of the desciptor | |
5038 // array's contents, and restore it to the fixed array map afterwards. | |
5039 // Note that a single descriptor can have 0, 1, or 2 transitions. | |
5040 // | |
5041 // (b) If we currently follow prototype transitions, we temporarily store | |
5042 // the current index in the map of the FixedArray holding the prototype | |
5043 // transitions, and restore it to the fixed array map afterwards. | |
5044 // | |
5045 // Note that the child iterator is just a concatenation of two iterators: One | |
5046 // iterating over map transitions and one iterating over prototype transisitons. | |
5047 class IterableMap : public Map { | |
5048 public: | |
5049 // Record the parent in the traversal within this map. Note that this destroys | |
5050 // this map's map! | |
5051 void setParent(IterableMap* parent) { set_map_no_write_barrier(parent); } | |
Vyacheslav Egorov (Chromium)
2012/01/20 16:30:10
naming convention: first letter capital
| |
5052 | |
5053 // Reset the current map's map, returning the parent previously stored in it. | |
5054 IterableMap* getAndResetParent() { | |
Vyacheslav Egorov (Chromium)
2012/01/20 16:30:10
naming convention: first letter capital
| |
5055 IterableMap* old_parent = static_cast<IterableMap*>(map()); | |
5056 set_map_no_write_barrier(GetHeap()->meta_map()); | |
5057 return old_parent; | |
5058 } | |
5059 | |
5060 // Start iterating over this map's children, possibly destroying a FixedArray | |
5061 // map (see explanation above). | |
5062 void childIteratorStart() { | |
Vyacheslav Egorov (Chromium)
2012/01/20 16:30:10
naming convention: first letter capital
| |
5063 iterable_descriptor_array()->Start(); | |
5064 iterable_prototype_transitions()->Start(); | |
5065 } | |
5066 | |
5067 // If we have an unvisited child map, return that one and advance. If we have | |
5068 // none, return NULL and reset any destroyed FixedArray maps. | |
5069 IterableMap* childIteratorNext() { | |
Vyacheslav Egorov (Chromium)
2012/01/20 16:30:10
naming convention: first letter capital
| |
5070 if (iterable_descriptor_array()->IsRunning()) { | |
5071 Map* next = iterable_descriptor_array()->Next(); | |
5072 if (next != NULL) return static_cast<IterableMap*>(next); | |
5073 } | |
5074 if (iterable_prototype_transitions()->IsRunning()) { | |
5075 Map* next = iterable_prototype_transitions()->Next(); | |
5076 if (next != NULL) return static_cast<IterableMap*>(next); | |
5077 } | |
5078 return NULL; | |
5079 } | |
5080 | |
5081 private: | |
5082 IterableDescriptorArray* iterable_descriptor_array() { | |
5083 return static_cast<IterableDescriptorArray*>(instance_descriptors()); | |
5084 } | |
5085 | |
5086 IterablePrototypeTransitions* iterable_prototype_transitions() { | |
5087 return static_cast<IterablePrototypeTransitions*>( | |
5088 unchecked_prototype_transitions()); | |
5089 } | |
5090 }; | |
5091 | |
5092 | |
5093 // Traverse the transition tree in postorder without using the C++ stack. We can | |
5094 // deduce the algorithm below using the following steps: | |
5095 // | |
5096 // (1) Implement the recursive textbook algorithm for visiting a tree. | |
Vyacheslav Egorov (Chromium)
2012/01/20 12:04:54
I don't think these 4 steps make algorithm any cle
Sven Panne
2012/01/20 13:15:33
I can easily live without the comment, but this wo
Vyacheslav Egorov (Chromium)
2012/01/20 13:59:50
I agree that this comment is absolutely correct, a
Sven Panne
2012/01/20 14:54:50
Understanding the derivation of a non-trivial algo
| |
5097 // | |
5098 // (2) Transform that into an iterative version using an explicit stack, where | |
5099 // each stack entry contains a pair of values: Which tree node do we | |
5100 // currently visit and info about which children of this node have already | |
5101 // been visited. | |
5102 // | |
5103 // (3) Use local variables for the top of stack pair. | |
5104 // | |
5105 // (4) Put the stack entry pair directly into the nodes, correctly resetting | |
5106 // any overwritten parts after the visit. | |
4938 void Map::TraverseTransitionTree(TraverseCallback callback, void* data) { | 5107 void Map::TraverseTransitionTree(TraverseCallback callback, void* data) { |
4939 // Traverse the transition tree without using a stack. We do this by | 5108 IterableMap* current = static_cast<IterableMap*>(this); |
4940 // reversing the pointers in the maps and descriptor arrays. | 5109 current->childIteratorStart(); |
4941 Map* current = this; | 5110 while (true) { |
Vyacheslav Egorov (Chromium)
2012/01/20 12:04:54
can it be changed into do { } while (current != me
Sven Panne
2012/01/20 13:15:33
I would prefer to leave it as it is, because using
Vyacheslav Egorov (Chromium)
2012/01/20 13:59:50
It would streamline control-flow (I would read thi
Sven Panne
2012/01/20 14:54:50
HasNext would complicate the currently easy iterat
Vyacheslav Egorov (Chromium)
2012/01/20 16:30:10
I consider loops with no special exits easier to r
| |
4942 Map* meta_map = GetHeap()->meta_map(); | 5111 IterableMap* child = current->childIteratorNext(); |
Vyacheslav Egorov (Chromium)
2012/01/20 12:04:54
I think it might be more clear to have descendInto
Sven Panne
2012/01/20 13:15:33
I think merging those calls together will make thi
Vyacheslav Egorov (Chromium)
2012/01/20 13:59:50
I have to agree that merging things together might
| |
4943 Object** map_or_index_field = NULL; | 5112 if (child != NULL) { |
4944 while (current != meta_map) { | 5113 child->childIteratorStart(); |
4945 DescriptorArray* d = reinterpret_cast<DescriptorArray*>( | 5114 child->setParent(current); |
4946 *RawField(current, Map::kInstanceDescriptorsOrBitField3Offset)); | 5115 current = child; |
4947 if (!d->IsEmpty()) { | |
4948 FixedArray* contents = reinterpret_cast<FixedArray*>( | |
4949 d->get(DescriptorArray::kContentArrayIndex)); | |
4950 map_or_index_field = RawField(contents, HeapObject::kMapOffset); | |
4951 Object* map_or_index = *map_or_index_field; | |
4952 bool map_done = true; // Controls a nested continue statement. | |
4953 for (int i = map_or_index->IsSmi() ? Smi::cast(map_or_index)->value() : 0; | |
4954 i < contents->length(); | |
4955 i += 2) { | |
4956 PropertyDetails details(Smi::cast(contents->get(i + 1))); | |
4957 if (details.IsTransition()) { | |
4958 // Found a map in the transition array. We record our progress in | |
4959 // the transition array by recording the current map in the map field | |
4960 // of the next map and recording the index in the transition array in | |
4961 // the map field of the array. | |
4962 Map* next = Map::cast(contents->get(i)); | |
4963 next->set_map_no_write_barrier(current); | |
4964 *map_or_index_field = Smi::FromInt(i + 2); | |
4965 current = next; | |
4966 map_done = false; | |
4967 break; | |
4968 } | |
4969 } | |
4970 if (!map_done) continue; | |
4971 } else { | 5116 } else { |
4972 map_or_index_field = NULL; | 5117 IterableMap* parent = current->getAndResetParent(); |
5118 callback(current, data); | |
5119 if (current == this) break; | |
5120 current = parent; | |
4973 } | 5121 } |
4974 // That was the regular transitions, now for the prototype transitions. | |
4975 FixedArray* prototype_transitions = | |
4976 current->unchecked_prototype_transitions(); | |
4977 Object** proto_map_or_index_field = | |
4978 RawField(prototype_transitions, HeapObject::kMapOffset); | |
4979 Object* map_or_index = *proto_map_or_index_field; | |
4980 const int start = kProtoTransitionHeaderSize + kProtoTransitionMapOffset; | |
4981 int i = map_or_index->IsSmi() ? Smi::cast(map_or_index)->value() : start; | |
4982 if (i < prototype_transitions->length()) { | |
4983 // Found a map in the prototype transition array. Record progress in | |
4984 // an analogous way to the regular transitions array above. | |
4985 Object* perhaps_map = prototype_transitions->get(i); | |
4986 if (perhaps_map->IsMap()) { | |
4987 Map* next = Map::cast(perhaps_map); | |
4988 next->set_map_no_write_barrier(current); | |
4989 *proto_map_or_index_field = | |
4990 Smi::FromInt(i + kProtoTransitionElementsPerEntry); | |
4991 current = next; | |
4992 continue; | |
4993 } | |
4994 } | |
4995 *proto_map_or_index_field = GetHeap()->fixed_array_map(); | |
4996 if (map_or_index_field != NULL) { | |
4997 *map_or_index_field = GetHeap()->fixed_array_map(); | |
4998 } | |
4999 | |
5000 // The callback expects a map to have a real map as its map, so we save | |
5001 // the map field, which is being used to track the traversal and put the | |
5002 // correct map (the meta_map) in place while we do the callback. | |
5003 Map* prev = current->map(); | |
5004 current->set_map_no_write_barrier(meta_map); | |
5005 callback(current, data); | |
5006 current = prev; | |
5007 } | 5122 } |
5008 } | 5123 } |
5009 | 5124 |
5010 | 5125 |
5011 MaybeObject* CodeCache::Update(String* name, Code* code) { | 5126 MaybeObject* CodeCache::Update(String* name, Code* code) { |
5012 // The number of monomorphic stubs for normal load/store/call IC's can grow to | 5127 // The number of monomorphic stubs for normal load/store/call IC's can grow to |
5013 // a large number and therefore they need to go into a hash table. They are | 5128 // a large number and therefore they need to go into a hash table. They are |
5014 // used to load global properties from cells. | 5129 // used to load global properties from cells. |
5015 if (code->type() == NORMAL) { | 5130 if (code->type() == NORMAL) { |
5016 // Make sure that a hash table is allocated for the normal load code cache. | 5131 // Make sure that a hash table is allocated for the normal load code cache. |
(...skipping 7792 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
12809 if (break_point_objects()->IsUndefined()) return 0; | 12924 if (break_point_objects()->IsUndefined()) return 0; |
12810 // Single break point. | 12925 // Single break point. |
12811 if (!break_point_objects()->IsFixedArray()) return 1; | 12926 if (!break_point_objects()->IsFixedArray()) return 1; |
12812 // Multiple break points. | 12927 // Multiple break points. |
12813 return FixedArray::cast(break_point_objects())->length(); | 12928 return FixedArray::cast(break_point_objects())->length(); |
12814 } | 12929 } |
12815 #endif // ENABLE_DEBUGGER_SUPPORT | 12930 #endif // ENABLE_DEBUGGER_SUPPORT |
12816 | 12931 |
12817 | 12932 |
12818 } } // namespace v8::internal | 12933 } } // namespace v8::internal |
OLD | NEW |