OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 6505 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6516 int KeyedLookupCache::Hash(Map* map, String* name) { | 6516 int KeyedLookupCache::Hash(Map* map, String* name) { |
6517 // Uses only lower 32 bits if pointers are larger. | 6517 // Uses only lower 32 bits if pointers are larger. |
6518 uintptr_t addr_hash = | 6518 uintptr_t addr_hash = |
6519 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift; | 6519 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift; |
6520 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask); | 6520 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask); |
6521 } | 6521 } |
6522 | 6522 |
6523 | 6523 |
6524 int KeyedLookupCache::Lookup(Map* map, String* name) { | 6524 int KeyedLookupCache::Lookup(Map* map, String* name) { |
6525 int index = (Hash(map, name) & kHashMask); | 6525 int index = (Hash(map, name) & kHashMask); |
6526 Key& key = keys_[index]; | 6526 for (int i = 0; i < kEntriesPerBucket; i++) { |
6527 if ((key.map == map) && key.name->Equals(name)) { | 6527 Key& key = keys_[index + i]; |
6528 return field_offsets_[index]; | 6528 if ((key.map == map) && key.name->Equals(name)) { |
6529 } | 6529 return field_offsets_[index + i]; |
6530 ASSERT(kEntriesPerBucket == 2); // There are two entries to check. | 6530 } |
6531 // First entry in the bucket missed, check the second. | |
6532 Key& key2 = keys_[index + 1]; | |
6533 if ((key2.map == map) && key2.name->Equals(name)) { | |
6534 return field_offsets_[index + 1]; | |
6535 } | 6531 } |
6536 return kNotFound; | 6532 return kNotFound; |
6537 } | 6533 } |
6538 | 6534 |
6539 | 6535 |
6540 void KeyedLookupCache::Update(Map* map, String* name, int field_offset) { | 6536 void KeyedLookupCache::Update(Map* map, String* name, int field_offset) { |
6541 String* symbol; | 6537 String* symbol; |
6542 if (HEAP->LookupSymbolIfExists(name, &symbol)) { | 6538 if (HEAP->LookupSymbolIfExists(name, &symbol)) { |
6543 int index = (Hash(map, symbol) & kHashMask); | 6539 int index = (Hash(map, symbol) & kHashMask); |
| 6540 // After a GC there will be free slots, so we use them in order (this may |
| 6541 // help to get the most frequently used one in position 0). |
| 6542 for (int i = 0; i< kEntriesPerBucket; i++) { |
| 6543 Key& key = keys_[index]; |
| 6544 Object* free_entry_indicator = NULL; |
| 6545 if (key.map == free_entry_indicator) { |
| 6546 key.map = map; |
| 6547 key.name = symbol; |
| 6548 field_offsets_[index + i] = field_offset; |
| 6549 return; |
| 6550 } |
| 6551 } |
| 6552 // No free entry found in this bucket, so we move them all down one and |
| 6553 // put the new entry at position zero. |
| 6554 for (int i = kEntriesPerBucket - 1; i > 0; i--) { |
| 6555 Key& key = keys_[index + i]; |
| 6556 Key& key2 = keys_[index + i - 1]; |
| 6557 key = key2; |
| 6558 field_offsets_[index + i] = field_offsets_[index + i - 1]; |
| 6559 } |
| 6560 |
| 6561 // Write the new first entry. |
6544 Key& key = keys_[index]; | 6562 Key& key = keys_[index]; |
6545 Key& key2 = keys_[index + 1]; // Second entry in the bucket. | |
6546 // Demote the first entry to the second in the bucket. | |
6547 key2.map = key.map; | |
6548 key2.name = key.name; | |
6549 field_offsets_[index + 1] = field_offsets_[index]; | |
6550 // Write the new first entry. | |
6551 key.map = map; | 6563 key.map = map; |
6552 key.name = symbol; | 6564 key.name = symbol; |
6553 field_offsets_[index] = field_offset; | 6565 field_offsets_[index] = field_offset; |
6554 } | 6566 } |
6555 } | 6567 } |
6556 | 6568 |
6557 | 6569 |
6558 void KeyedLookupCache::Clear() { | 6570 void KeyedLookupCache::Clear() { |
6559 for (int index = 0; index < kLength; index++) keys_[index].map = NULL; | 6571 for (int index = 0; index < kLength; index++) keys_[index].map = NULL; |
6560 } | 6572 } |
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6675 isolate_->heap()->store_buffer()->Compact(); | 6687 isolate_->heap()->store_buffer()->Compact(); |
6676 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED); | 6688 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED); |
6677 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) { | 6689 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) { |
6678 next = chunk->next_chunk(); | 6690 next = chunk->next_chunk(); |
6679 isolate_->memory_allocator()->Free(chunk); | 6691 isolate_->memory_allocator()->Free(chunk); |
6680 } | 6692 } |
6681 chunks_queued_for_free_ = NULL; | 6693 chunks_queued_for_free_ = NULL; |
6682 } | 6694 } |
6683 | 6695 |
6684 } } // namespace v8::internal | 6696 } } // namespace v8::internal |
OLD | NEW |