| Index: src/stub-cache.h
|
| ===================================================================
|
| --- src/stub-cache.h (revision 10860)
|
| +++ src/stub-cache.h (working copy)
|
| @@ -69,6 +69,7 @@
|
| struct Entry {
|
| String* key;
|
| Code* value;
|
| + Map* map;
|
| };
|
|
|
| void Initialize(bool create_heap_objects);
|
| @@ -252,7 +253,7 @@
|
| Handle<Context> global_context);
|
|
|
| // Generate code for probing the stub cache table.
|
| - // Arguments extra and extra2 may be used to pass additional scratch
|
| + // Arguments extra, extra2 and extra3 may be used to pass additional scratch
|
| // registers. Set to no_reg if not needed.
|
| void GenerateProbe(MacroAssembler* masm,
|
| Code::Flags flags,
|
| @@ -260,7 +261,8 @@
|
| Register name,
|
| Register scratch,
|
| Register extra,
|
| - Register extra2 = no_reg);
|
| + Register extra2 = no_reg,
|
| + Register extra3 = no_reg);
|
|
|
| enum Table {
|
| kPrimary,
|
| @@ -274,6 +276,12 @@
|
| }
|
|
|
|
|
| + SCTableReference map_reference(StubCache::Table table) {
|
| + return SCTableReference(
|
| + reinterpret_cast<Address>(&first_entry(table)->map));
|
| + }
|
| +
|
| +
|
| SCTableReference value_reference(StubCache::Table table) {
|
| return SCTableReference(
|
| reinterpret_cast<Address>(&first_entry(table)->value));
|
| @@ -328,19 +336,23 @@
|
| // Use the seed from the primary cache in the secondary cache.
|
| uint32_t string_low32bits =
|
| static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
|
| - uint32_t key = seed - string_low32bits + flags;
|
| + // We always set the in_loop bit to zero when generating the lookup code
|
| + // so do it here too so the hash codes match.
|
| + uint32_t iflags =
|
| + (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
|
| + uint32_t key = (seed - string_low32bits) + iflags;
|
| return key & ((kSecondaryTableSize - 1) << kHeapObjectTagSize);
|
| }
|
|
|
| // Compute the entry for a given offset in exactly the same way as
|
| // we do in generated code. We generate an hash code that already
|
| - // ends in String::kHashShift 0s. Then we shift it so it is a multiple
|
| + // ends in String::kHashShift 0s. Then we multiply it so it is a multiple
|
| // of sizeof(Entry). This makes it easier to avoid making mistakes
|
| // in the hashed offset computations.
|
| static Entry* entry(Entry* table, int offset) {
|
| - const int shift_amount = kPointerSizeLog2 + 1 - String::kHashShift;
|
| + const int multiplier = sizeof(*table) >> String::kHashShift;
|
| return reinterpret_cast<Entry*>(
|
| - reinterpret_cast<Address>(table) + (offset << shift_amount));
|
| + reinterpret_cast<Address>(table) + offset * multiplier);
|
| }
|
|
|
| static const int kPrimaryTableBits = 11;
|
|
|