OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 2088 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2099 TransitionArray* transition_array = transitions(); | 2099 TransitionArray* transition_array = transitions(); |
2100 int number = transition_array->Search(name); | 2100 int number = transition_array->Search(name); |
2101 if (number != TransitionArray::kNotFound) { | 2101 if (number != TransitionArray::kNotFound) { |
2102 return result->TransitionResult(holder, number); | 2102 return result->TransitionResult(holder, number); |
2103 } | 2103 } |
2104 } | 2104 } |
2105 result->NotFound(); | 2105 result->NotFound(); |
2106 } | 2106 } |
2107 | 2107 |
2108 | 2108 |
2109 enum RightTrimPhase { FROM_GC, FROM_MUTATOR }; | |
Michael Starzinger
2012/07/25 11:09:06
Let's call that RightTrimMode or something like th
| |
2110 | |
2111 | |
2112 static void ZapEndOfFixedArray(Address new_end, int to_trim) { | |
2113 // If we are doing a big trim in old space then we zap the space. | |
2114 Object** zap = reinterpret_cast<Object**>(new_end); | |
2115 for (int i = 1; i < to_trim; i++) { | |
2116 *zap++ = Smi::FromInt(0); | |
2117 } | |
2118 } | |
2119 | |
2120 template<RightTrimPhase trim_phase> | |
Michael Starzinger
2012/07/25 11:09:06
Let's call that trim_mode.
| |
2121 static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) { | |
2122 ASSERT(elms->map() != HEAP->fixed_cow_array_map()); | |
2123 // For now this trick is only applied to fixed arrays in new and paged space. | |
Vyacheslav Egorov (Google)
2012/07/25 12:04:22
this comment makes no sense for trimming from the
| |
2124 // In large object space the object's start must coincide with chunk | |
2125 // and thus the trick is just not applicable. | |
2126 ASSERT(!HEAP->lo_space()->Contains(elms)); | |
2127 | |
2128 const int len = elms->length(); | |
2129 | |
2130 ASSERT(to_trim < len); | |
2131 | |
2132 Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim); | |
2133 | |
2134 if (trim_phase == FROM_GC) { | |
2135 #ifdef DEBUG | |
2136 ZapEndOfFixedArray(new_end, to_trim); | |
2137 #endif | |
2138 } else { | |
2139 ZapEndOfFixedArray(new_end, to_trim); | |
2140 } | |
2141 | |
2142 int size_delta = to_trim * kPointerSize; | |
2143 | |
2144 // Technically in new space this write might be omitted (except for | |
2145 // debug mode which iterates through the heap), but to play safer | |
2146 // we still do it. | |
2147 heap->CreateFillerObjectAt(new_end, size_delta); | |
2148 | |
2149 elms->set_length(len - to_trim); | |
2150 | |
2151 // Maintain marking consistency for IncrementalMarking. | |
2152 if (Marking::IsBlack(Marking::MarkBitFrom(elms))) { | |
2153 if (trim_phase == FROM_GC) { | |
2154 MemoryChunk::IncrementLiveBytesFromGC(elms->address(), -size_delta); | |
2155 } else { | |
2156 MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta); | |
2157 } | |
2158 } | |
2159 } | |
2160 | |
2161 | |
2162 void Map::CopyAppendCallbackDescriptors(Handle<Map> map, | |
2163 Handle<Object> descriptors) { | |
2164 Isolate* isolate = map->GetIsolate(); | |
2165 Handle<DescriptorArray> array(map->instance_descriptors()); | |
2166 v8::NeanderArray callbacks(descriptors); | |
2167 int nof_callbacks = callbacks.length(); | |
2168 int descriptor_count = array->number_of_descriptors(); | |
2169 | |
2170 // Ensure the keys are symbols before writing them into the instance | |
2171 // descriptor. Since it may cause a GC, it has to be done before we | |
2172 // temporarily put the heap in an invalid state while appending descriptors. | |
2173 for (int i = 0; i < nof_callbacks; ++i) { | |
2174 Handle<AccessorInfo> entry(AccessorInfo::cast(callbacks.get(i))); | |
2175 Handle<String> key = | |
2176 isolate->factory()->SymbolFromString( | |
2177 Handle<String>(String::cast(entry->name()))); | |
2178 entry->set_name(*key); | |
2179 } | |
2180 | |
2181 Handle<DescriptorArray> result = | |
2182 isolate->factory()->NewDescriptorArray(descriptor_count + nof_callbacks); | |
2183 | |
2184 // Ensure that marking will not progress and change color of objects. | |
2185 DescriptorArray::WhitenessWitness witness(*result); | |
2186 | |
2187 // Copy the descriptors from the array. | |
2188 if (0 < descriptor_count) { | |
2189 for (int i = 0; i < descriptor_count; i++) { | |
2190 result->CopyFrom(i, *array, i, witness); | |
2191 } | |
2192 } | |
2193 | |
2194 // After this point the GC is not allowed to run anymore until the map is in a | |
2195 // consistent state again, i.e., all the descriptors are appended and the | |
2196 // descriptor array is trimmed to the right size. | |
2197 map->set_instance_descriptors(*result); | |
2198 | |
2199 // Fill in new callback descriptors. Process the callbacks from | |
2200 // back to front so that the last callback with a given name takes | |
2201 // precedence over previously added callbacks with that name. | |
2202 for (int i = nof_callbacks - 1; i >= 0; i--) { | |
2203 AccessorInfo* entry = AccessorInfo::cast(callbacks.get(i)); | |
2204 String* key = String::cast(entry->name()); | |
2205 // Check if a descriptor with this name already exists before writing. | |
2206 if (LinearSearch(*result, key, map->NumberOfSetDescriptors()) == | |
2207 DescriptorArray::kNotFound) { | |
2208 CallbacksDescriptor desc(key, entry, entry->property_attributes()); | |
2209 map->AppendDescriptor(&desc, witness); | |
2210 } | |
2211 } | |
2212 | |
2213 int new_number_of_descriptors = map->NumberOfSetDescriptors(); | |
2214 // Reinstall the original descriptor array if no new elements were added. | |
2215 if (new_number_of_descriptors == descriptor_count) { | |
2216 map->set_instance_descriptors(*array); | |
2217 return; | |
2218 } | |
2219 | |
2220 // If duplicates were detected, trim the descriptor array to the right size. | |
2221 int new_array_size = DescriptorArray::SizeFor(new_number_of_descriptors); | |
Michael Starzinger
2012/07/25 11:09:06
Nice catch.
| |
2222 if (new_array_size < result->length()) { | |
2223 RightTrimFixedArray<FROM_MUTATOR>( | |
2224 isolate->heap(), *result, result->length() - new_array_size); | |
2225 } | |
2226 } | |
2227 | |
2228 | |
2109 static bool ContainsMap(MapHandleList* maps, Handle<Map> map) { | 2229 static bool ContainsMap(MapHandleList* maps, Handle<Map> map) { |
2110 ASSERT(!map.is_null()); | 2230 ASSERT(!map.is_null()); |
2111 for (int i = 0; i < maps->length(); ++i) { | 2231 for (int i = 0; i < maps->length(); ++i) { |
2112 if (!maps->at(i).is_null() && maps->at(i).is_identical_to(map)) return true; | 2232 if (!maps->at(i).is_null() && maps->at(i).is_identical_to(map)) return true; |
2113 } | 2233 } |
2114 return false; | 2234 return false; |
2115 } | 2235 } |
2116 | 2236 |
2117 | 2237 |
2118 template <class T> | 2238 template <class T> |
(...skipping 3597 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5716 | 5836 |
5717 MaybeObject* DescriptorArray::Allocate(int number_of_descriptors, | 5837 MaybeObject* DescriptorArray::Allocate(int number_of_descriptors, |
5718 SharedMode shared_mode) { | 5838 SharedMode shared_mode) { |
5719 Heap* heap = Isolate::Current()->heap(); | 5839 Heap* heap = Isolate::Current()->heap(); |
5720 // Do not use DescriptorArray::cast on incomplete object. | 5840 // Do not use DescriptorArray::cast on incomplete object. |
5721 FixedArray* result; | 5841 FixedArray* result; |
5722 if (number_of_descriptors == 0 && shared_mode == MAY_BE_SHARED) { | 5842 if (number_of_descriptors == 0 && shared_mode == MAY_BE_SHARED) { |
5723 return heap->empty_descriptor_array(); | 5843 return heap->empty_descriptor_array(); |
5724 } | 5844 } |
5725 // Allocate the array of keys. | 5845 // Allocate the array of keys. |
5726 { MaybeObject* maybe_array = | 5846 MaybeObject* maybe_array = |
5727 heap->AllocateFixedArray(ToKeyIndex(number_of_descriptors)); | 5847 heap->AllocateFixedArray(SizeFor(number_of_descriptors)); |
5728 if (!maybe_array->To(&result)) return maybe_array; | 5848 if (!maybe_array->To(&result)) return maybe_array; |
5729 } | |
5730 | 5849 |
5731 result->set(kEnumCacheIndex, Smi::FromInt(0)); | 5850 result->set(kEnumCacheIndex, Smi::FromInt(0)); |
5732 result->set(kTransitionsIndex, Smi::FromInt(0)); | 5851 result->set(kTransitionsIndex, Smi::FromInt(0)); |
5733 return result; | 5852 return result; |
5734 } | 5853 } |
5735 | 5854 |
5736 | 5855 |
5737 void DescriptorArray::SetEnumCache(FixedArray* bridge_storage, | 5856 void DescriptorArray::SetEnumCache(FixedArray* bridge_storage, |
5738 FixedArray* new_cache, | 5857 FixedArray* new_cache, |
5739 Object* new_index_cache) { | 5858 Object* new_index_cache) { |
(...skipping 1336 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
7076 | 7195 |
7077 | 7196 |
7078 void String::PrintOn(FILE* file) { | 7197 void String::PrintOn(FILE* file) { |
7079 int length = this->length(); | 7198 int length = this->length(); |
7080 for (int i = 0; i < length; i++) { | 7199 for (int i = 0; i < length; i++) { |
7081 fprintf(file, "%c", Get(i)); | 7200 fprintf(file, "%c", Get(i)); |
7082 } | 7201 } |
7083 } | 7202 } |
7084 | 7203 |
7085 | 7204 |
7086 // This function should only be called from within the GC, since it uses | |
7087 // IncrementLiveBytesFromGC. If called from anywhere else, this results in an | |
7088 // inconsistent live-bytes count. | |
7089 static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) { | |
7090 ASSERT(elms->map() != HEAP->fixed_cow_array_map()); | |
7091 // For now this trick is only applied to fixed arrays in new and paged space. | |
7092 // In large object space the object's start must coincide with chunk | |
7093 // and thus the trick is just not applicable. | |
7094 ASSERT(!HEAP->lo_space()->Contains(elms)); | |
7095 | |
7096 const int len = elms->length(); | |
7097 | |
7098 ASSERT(to_trim < len); | |
7099 | |
7100 Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim); | |
7101 | |
7102 #ifdef DEBUG | |
7103 // If we are doing a big trim in old space then we zap the space. | |
7104 Object** zap = reinterpret_cast<Object**>(new_end); | |
7105 for (int i = 1; i < to_trim; i++) { | |
7106 *zap++ = Smi::FromInt(0); | |
7107 } | |
7108 #endif | |
7109 | |
7110 int size_delta = to_trim * kPointerSize; | |
7111 | |
7112 // Technically in new space this write might be omitted (except for | |
7113 // debug mode which iterates through the heap), but to play safer | |
7114 // we still do it. | |
7115 heap->CreateFillerObjectAt(new_end, size_delta); | |
7116 | |
7117 elms->set_length(len - to_trim); | |
7118 | |
7119 // Maintain marking consistency for IncrementalMarking. | |
7120 if (Marking::IsBlack(Marking::MarkBitFrom(elms))) { | |
7121 MemoryChunk::IncrementLiveBytesFromGC(elms->address(), -size_delta); | |
7122 } | |
7123 } | |
7124 | |
7125 | |
7126 // Clear a possible back pointer in case the transition leads to a dead map. | 7205 // Clear a possible back pointer in case the transition leads to a dead map. |
7127 // Return true in case a back pointer has been cleared and false otherwise. | 7206 // Return true in case a back pointer has been cleared and false otherwise. |
7128 static bool ClearBackPointer(Heap* heap, Object* target) { | 7207 static bool ClearBackPointer(Heap* heap, Object* target) { |
7129 ASSERT(target->IsMap()); | 7208 ASSERT(target->IsMap()); |
7130 Map* map = Map::cast(target); | 7209 Map* map = Map::cast(target); |
7131 if (Marking::MarkBitFrom(map).Get()) return false; | 7210 if (Marking::MarkBitFrom(map).Get()) return false; |
7132 map->SetBackPointer(heap->undefined_value(), SKIP_WRITE_BARRIER); | 7211 map->SetBackPointer(heap->undefined_value(), SKIP_WRITE_BARRIER); |
7133 return true; | 7212 return true; |
7134 } | 7213 } |
7135 | 7214 |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
7178 // If the final transition array does not contain any live transitions, remove | 7257 // If the final transition array does not contain any live transitions, remove |
7179 // the transition array from the map. | 7258 // the transition array from the map. |
7180 if (transition_index == 0 && | 7259 if (transition_index == 0 && |
7181 !t->HasElementsTransition() && | 7260 !t->HasElementsTransition() && |
7182 !t->HasPrototypeTransitions()) { | 7261 !t->HasPrototypeTransitions()) { |
7183 return ClearTransitions(heap); | 7262 return ClearTransitions(heap); |
7184 } | 7263 } |
7185 | 7264 |
7186 int trim = t->number_of_transitions() - transition_index; | 7265 int trim = t->number_of_transitions() - transition_index; |
7187 if (trim > 0) { | 7266 if (trim > 0) { |
7188 RightTrimFixedArray(heap, t, trim * TransitionArray::kTransitionSize); | 7267 RightTrimFixedArray<FROM_GC>( |
7268 heap, t, trim * TransitionArray::kTransitionSize); | |
7189 } | 7269 } |
7190 } | 7270 } |
7191 | 7271 |
7192 | 7272 |
7193 int Map::Hash() { | 7273 int Map::Hash() { |
7194 // For performance reasons we only hash the 3 most variable fields of a map: | 7274 // For performance reasons we only hash the 3 most variable fields of a map: |
7195 // constructor, prototype and bit_field2. | 7275 // constructor, prototype and bit_field2. |
7196 | 7276 |
7197 // Shift away the tag. | 7277 // Shift away the tag. |
7198 int hash = (static_cast<uint32_t>( | 7278 int hash = (static_cast<uint32_t>( |
(...skipping 5855 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
13054 set_year(Smi::FromInt(year), SKIP_WRITE_BARRIER); | 13134 set_year(Smi::FromInt(year), SKIP_WRITE_BARRIER); |
13055 set_month(Smi::FromInt(month), SKIP_WRITE_BARRIER); | 13135 set_month(Smi::FromInt(month), SKIP_WRITE_BARRIER); |
13056 set_day(Smi::FromInt(day), SKIP_WRITE_BARRIER); | 13136 set_day(Smi::FromInt(day), SKIP_WRITE_BARRIER); |
13057 set_weekday(Smi::FromInt(weekday), SKIP_WRITE_BARRIER); | 13137 set_weekday(Smi::FromInt(weekday), SKIP_WRITE_BARRIER); |
13058 set_hour(Smi::FromInt(hour), SKIP_WRITE_BARRIER); | 13138 set_hour(Smi::FromInt(hour), SKIP_WRITE_BARRIER); |
13059 set_min(Smi::FromInt(min), SKIP_WRITE_BARRIER); | 13139 set_min(Smi::FromInt(min), SKIP_WRITE_BARRIER); |
13060 set_sec(Smi::FromInt(sec), SKIP_WRITE_BARRIER); | 13140 set_sec(Smi::FromInt(sec), SKIP_WRITE_BARRIER); |
13061 } | 13141 } |
13062 | 13142 |
13063 } } // namespace v8::internal | 13143 } } // namespace v8::internal |
OLD | NEW |