| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 62 | 62 |
| 63 friend class StubCache; | 63 friend class StubCache; |
| 64 }; | 64 }; |
| 65 | 65 |
| 66 | 66 |
| 67 class StubCache { | 67 class StubCache { |
| 68 public: | 68 public: |
| 69 struct Entry { | 69 struct Entry { |
| 70 String* key; | 70 String* key; |
| 71 Code* value; | 71 Code* value; |
| 72 Map* map; |
| 72 }; | 73 }; |
| 73 | 74 |
| 74 void Initialize(bool create_heap_objects); | 75 void Initialize(bool create_heap_objects); |
| 75 | 76 |
| 76 | 77 |
| 77 // Computes the right stub matching. Inserts the result in the | 78 // Computes the right stub matching. Inserts the result in the |
| 78 // cache before returning. This might compile a stub if needed. | 79 // cache before returning. This might compile a stub if needed. |
| 79 Handle<Code> ComputeLoadNonexistent(Handle<String> name, | 80 Handle<Code> ComputeLoadNonexistent(Handle<String> name, |
| 80 Handle<JSObject> receiver); | 81 Handle<JSObject> receiver); |
| 81 | 82 |
| (...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 245 // Clear the lookup table (@ mark compact collection). | 246 // Clear the lookup table (@ mark compact collection). |
| 246 void Clear(); | 247 void Clear(); |
| 247 | 248 |
| 248 // Collect all maps that match the name and flags. | 249 // Collect all maps that match the name and flags. |
| 249 void CollectMatchingMaps(SmallMapList* types, | 250 void CollectMatchingMaps(SmallMapList* types, |
| 250 String* name, | 251 String* name, |
| 251 Code::Flags flags, | 252 Code::Flags flags, |
| 252 Handle<Context> global_context); | 253 Handle<Context> global_context); |
| 253 | 254 |
| 254 // Generate code for probing the stub cache table. | 255 // Generate code for probing the stub cache table. |
| 255 // Arguments extra and extra2 may be used to pass additional scratch | 256 // Arguments extra, extra2 and extra3 may be used to pass additional scratch |
| 256 // registers. Set to no_reg if not needed. | 257 // registers. Set to no_reg if not needed. |
| 257 void GenerateProbe(MacroAssembler* masm, | 258 void GenerateProbe(MacroAssembler* masm, |
| 258 Code::Flags flags, | 259 Code::Flags flags, |
| 259 Register receiver, | 260 Register receiver, |
| 260 Register name, | 261 Register name, |
| 261 Register scratch, | 262 Register scratch, |
| 262 Register extra, | 263 Register extra, |
| 263 Register extra2 = no_reg); | 264 Register extra2 = no_reg, |
| 265 Register extra3 = no_reg); |
| 264 | 266 |
| 265 enum Table { | 267 enum Table { |
| 266 kPrimary, | 268 kPrimary, |
| 267 kSecondary | 269 kSecondary |
| 268 }; | 270 }; |
| 269 | 271 |
| 270 | 272 |
| 271 SCTableReference key_reference(StubCache::Table table) { | 273 SCTableReference key_reference(StubCache::Table table) { |
| 272 return SCTableReference( | 274 return SCTableReference( |
| 273 reinterpret_cast<Address>(&first_entry(table)->key)); | 275 reinterpret_cast<Address>(&first_entry(table)->key)); |
| 274 } | 276 } |
| 275 | 277 |
| 276 | 278 |
| 279 SCTableReference map_reference(StubCache::Table table) { |
| 280 return SCTableReference( |
| 281 reinterpret_cast<Address>(&first_entry(table)->map)); |
| 282 } |
| 283 |
| 284 |
| 277 SCTableReference value_reference(StubCache::Table table) { | 285 SCTableReference value_reference(StubCache::Table table) { |
| 278 return SCTableReference( | 286 return SCTableReference( |
| 279 reinterpret_cast<Address>(&first_entry(table)->value)); | 287 reinterpret_cast<Address>(&first_entry(table)->value)); |
| 280 } | 288 } |
| 281 | 289 |
| 282 | 290 |
| 283 StubCache::Entry* first_entry(StubCache::Table table) { | 291 StubCache::Entry* first_entry(StubCache::Table table) { |
| 284 switch (table) { | 292 switch (table) { |
| 285 case StubCache::kPrimary: return StubCache::primary_; | 293 case StubCache::kPrimary: return StubCache::primary_; |
| 286 case StubCache::kSecondary: return StubCache::secondary_; | 294 case StubCache::kSecondary: return StubCache::secondary_; |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 321 (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup); | 329 (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup); |
| 322 // Base the offset on a simple combination of name, flags, and map. | 330 // Base the offset on a simple combination of name, flags, and map. |
| 323 uint32_t key = (map_low32bits + field) ^ iflags; | 331 uint32_t key = (map_low32bits + field) ^ iflags; |
| 324 return key & ((kPrimaryTableSize - 1) << kHeapObjectTagSize); | 332 return key & ((kPrimaryTableSize - 1) << kHeapObjectTagSize); |
| 325 } | 333 } |
| 326 | 334 |
| 327 static int SecondaryOffset(String* name, Code::Flags flags, int seed) { | 335 static int SecondaryOffset(String* name, Code::Flags flags, int seed) { |
| 328 // Use the seed from the primary cache in the secondary cache. | 336 // Use the seed from the primary cache in the secondary cache. |
| 329 uint32_t string_low32bits = | 337 uint32_t string_low32bits = |
| 330 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)); | 338 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)); |
| 331 uint32_t key = seed - string_low32bits + flags; | 339 // We always set the in_loop bit to zero when generating the lookup code |
| 340 // so do it here too so the hash codes match. |
| 341 uint32_t iflags = |
| 342 (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup); |
| 343 uint32_t key = (seed - string_low32bits) + iflags; |
| 332 return key & ((kSecondaryTableSize - 1) << kHeapObjectTagSize); | 344 return key & ((kSecondaryTableSize - 1) << kHeapObjectTagSize); |
| 333 } | 345 } |
| 334 | 346 |
| 335 // Compute the entry for a given offset in exactly the same way as | 347 // Compute the entry for a given offset in exactly the same way as |
| 336 // we do in generated code. We generate an hash code that already | 348 // we do in generated code. We generate an hash code that already |
| 337 // ends in String::kHashShift 0s. Then we shift it so it is a multiple | 349 // ends in String::kHashShift 0s. Then we multiply it so it is a multiple |
| 338 // of sizeof(Entry). This makes it easier to avoid making mistakes | 350 // of sizeof(Entry). This makes it easier to avoid making mistakes |
| 339 // in the hashed offset computations. | 351 // in the hashed offset computations. |
| 340 static Entry* entry(Entry* table, int offset) { | 352 static Entry* entry(Entry* table, int offset) { |
| 341 const int shift_amount = kPointerSizeLog2 + 1 - String::kHashShift; | 353 const int multiplier = sizeof(*table) >> String::kHashShift; |
| 342 return reinterpret_cast<Entry*>( | 354 return reinterpret_cast<Entry*>( |
| 343 reinterpret_cast<Address>(table) + (offset << shift_amount)); | 355 reinterpret_cast<Address>(table) + offset * multiplier); |
| 344 } | 356 } |
| 345 | 357 |
| 346 static const int kPrimaryTableBits = 11; | 358 static const int kPrimaryTableBits = 11; |
| 347 static const int kPrimaryTableSize = (1 << kPrimaryTableBits); | 359 static const int kPrimaryTableSize = (1 << kPrimaryTableBits); |
| 348 static const int kSecondaryTableBits = 9; | 360 static const int kSecondaryTableBits = 9; |
| 349 static const int kSecondaryTableSize = (1 << kSecondaryTableBits); | 361 static const int kSecondaryTableSize = (1 << kSecondaryTableBits); |
| 350 | 362 |
| 351 Entry primary_[kPrimaryTableSize]; | 363 Entry primary_[kPrimaryTableSize]; |
| 352 Entry secondary_[kSecondaryTableSize]; | 364 Entry secondary_[kSecondaryTableSize]; |
| 353 Isolate* isolate_; | 365 Isolate* isolate_; |
| (...skipping 513 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 867 Handle<JSFunction> constant_function_; | 879 Handle<JSFunction> constant_function_; |
| 868 bool is_simple_api_call_; | 880 bool is_simple_api_call_; |
| 869 Handle<FunctionTemplateInfo> expected_receiver_type_; | 881 Handle<FunctionTemplateInfo> expected_receiver_type_; |
| 870 Handle<CallHandlerInfo> api_call_info_; | 882 Handle<CallHandlerInfo> api_call_info_; |
| 871 }; | 883 }; |
| 872 | 884 |
| 873 | 885 |
| 874 } } // namespace v8::internal | 886 } } // namespace v8::internal |
| 875 | 887 |
| 876 #endif // V8_STUB_CACHE_H_ | 888 #endif // V8_STUB_CACHE_H_ |
| OLD | NEW |