OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 25 matching lines...) Expand all Loading... | |
36 namespace v8 { | 36 namespace v8 { |
37 namespace internal { | 37 namespace internal { |
38 | 38 |
39 #define __ ACCESS_MASM(masm) | 39 #define __ ACCESS_MASM(masm) |
40 | 40 |
41 | 41 |
42 static void ProbeTable(Isolate* isolate, | 42 static void ProbeTable(Isolate* isolate, |
43 MacroAssembler* masm, | 43 MacroAssembler* masm, |
44 Code::Flags flags, | 44 Code::Flags flags, |
45 StubCache::Table table, | 45 StubCache::Table table, |
46 Register receiver, | |
46 Register name, | 47 Register name, |
47 Register offset, | 48 Register offset, |
48 int offset_shift_bits, | |
49 Register scratch, | 49 Register scratch, |
50 Register scratch2) { | 50 Register scratch2, |
51 Register offset_scratch) { | |
51 ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); | 52 ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); |
52 ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); | 53 ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); |
54 ExternalReference map_offset(isolate->stub_cache()->map_reference(table)); | |
53 | 55 |
54 uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address()); | 56 uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address()); |
55 uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address()); | 57 uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address()); |
58 uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address()); | |
56 | 59 |
57 // Check the relative positions of the address fields. | 60 // Check the relative positions of the address fields. |
58 ASSERT(value_off_addr > key_off_addr); | 61 ASSERT(value_off_addr > key_off_addr); |
59 ASSERT((value_off_addr - key_off_addr) % 4 == 0); | 62 ASSERT((value_off_addr - key_off_addr) % 4 == 0); |
60 ASSERT((value_off_addr - key_off_addr) < (256 * 4)); | 63 ASSERT((value_off_addr - key_off_addr) < (256 * 4)); |
64 ASSERT(map_off_addr > key_off_addr); | |
65 ASSERT((map_off_addr - key_off_addr) % 4 == 0); | |
66 ASSERT((map_off_addr - key_off_addr) < (256 * 4)); | |
61 | 67 |
62 Label miss; | 68 Label miss; |
63 Register offsets_base_addr = scratch; | 69 Register base_addr = scratch; |
70 scratch = no_reg; | |
71 | |
72 // Multiply by 3. | |
73 __ add(offset_scratch, offset, Operand(offset, LSL, 1)); | |
74 | |
75 // Calculate the base address of the entry. | |
76 __ mov(base_addr, Operand(key_offset)); | |
77 __ add(base_addr, base_addr, Operand(offset_scratch, LSL, 1)); | |
64 | 78 |
65 // Check that the key in the entry matches the name. | 79 // Check that the key in the entry matches the name. |
66 __ mov(offsets_base_addr, Operand(key_offset)); | 80 __ ldr(ip, MemOperand(base_addr, 0)); |
67 __ ldr(ip, MemOperand(offsets_base_addr, offset, LSL, 1 + offset_shift_bits)); | |
68 __ cmp(name, ip); | 81 __ cmp(name, ip); |
69 __ b(ne, &miss); | 82 __ b(ne, &miss); |
70 | 83 |
84 // Check the map matches. | |
85 __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr)); | |
86 __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
87 __ cmp(ip, scratch2); | |
88 __ b(ne, &miss); | |
89 | |
71 // Get the code entry from the cache. | 90 // Get the code entry from the cache. |
72 __ add(offsets_base_addr, offsets_base_addr, | 91 Register code = scratch2; |
73 Operand(value_off_addr - key_off_addr)); | 92 scratch2 = no_reg; |
74 __ ldr(scratch2, | 93 __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr)); |
75 MemOperand(offsets_base_addr, offset, LSL, 1 + offset_shift_bits)); | |
76 | 94 |
77 // Check that the flags match what we're looking for. | 95 // Check that the flags match what we're looking for. |
78 __ ldr(scratch2, FieldMemOperand(scratch2, Code::kFlagsOffset)); | 96 Register flags_reg = base_addr; |
97 base_addr = no_reg; | |
98 __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset)); | |
79 // It's a nice optimization if this constant is encodable in the bic insn. | 99 // It's a nice optimization if this constant is encodable in the bic insn. |
80 | 100 |
81 uint32_t mask = Code::kFlagsNotUsedInLookup; | 101 uint32_t mask = Code::kFlagsNotUsedInLookup; |
82 ASSERT(__ ImmediateFitsAddrMode1Instruction(mask)); | 102 ASSERT(__ ImmediateFitsAddrMode1Instruction(mask)); |
83 __ bic(scratch2, scratch2, Operand(mask)); | 103 __ bic(flags_reg, flags_reg, Operand(mask)); |
84 // Using cmn and the negative instead of cmp means we can use movw. | 104 // Using cmn and the negative instead of cmp means we can use movw. |
85 if (flags < 0) { | 105 if (flags < 0) { |
86 __ cmn(scratch2, Operand(-flags)); | 106 __ cmn(flags_reg, Operand(-flags)); |
87 } else { | 107 } else { |
88 __ cmp(scratch2, Operand(flags)); | 108 __ cmp(flags_reg, Operand(flags)); |
89 } | 109 } |
90 __ b(ne, &miss); | 110 __ b(ne, &miss); |
91 | 111 |
92 // Re-load code entry from cache. | 112 #ifdef DEBUG |
93 __ ldr(offset, | 113 if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { |
94 MemOperand(offsets_base_addr, offset, LSL, 1 + offset_shift_bits)); | 114 __ jmp(&miss); |
115 } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { | |
116 __ jmp(&miss); | |
117 } | |
118 #endif | |
95 | 119 |
96 // Jump to the first instruction in the code stub. | 120 // Jump to the first instruction in the code stub. |
97 __ add(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag)); | 121 __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag)); |
98 __ Jump(offset); | |
99 | 122 |
100 // Miss: fall through. | 123 // Miss: fall through. |
101 __ bind(&miss); | 124 __ bind(&miss); |
102 } | 125 } |
103 | 126 |
104 | 127 |
105 // Helper function used to check that the dictionary doesn't contain | 128 // Helper function used to check that the dictionary doesn't contain |
106 // the property. This function may return false negatives, so miss_label | 129 // the property. This function may return false negatives, so miss_label |
107 // must always call a backup property check that is complete. | 130 // must always call a backup property check that is complete. |
108 // This function is safe to call if the receiver has fast properties. | 131 // This function is safe to call if the receiver has fast properties. |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
160 __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); | 183 __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); |
161 } | 184 } |
162 | 185 |
163 | 186 |
164 void StubCache::GenerateProbe(MacroAssembler* masm, | 187 void StubCache::GenerateProbe(MacroAssembler* masm, |
165 Code::Flags flags, | 188 Code::Flags flags, |
166 Register receiver, | 189 Register receiver, |
167 Register name, | 190 Register name, |
168 Register scratch, | 191 Register scratch, |
169 Register extra, | 192 Register extra, |
170 Register extra2) { | 193 Register extra2, |
194 Register extra3) { | |
171 Isolate* isolate = masm->isolate(); | 195 Isolate* isolate = masm->isolate(); |
172 Label miss; | 196 Label miss; |
173 | 197 |
174 // Make sure that code is valid. The shifting code relies on the | 198 // Make sure that code is valid. The multiplying code relies on the |
175 // entry size being 8. | 199 // entry size being 12. |
Sven Panne
2012/02/29 09:37:42
Hmmm, I can see multiplication by 3 and a left shi
Erik Corry
2012/02/29 10:45:59
Done.
| |
176 ASSERT(sizeof(Entry) == 8); | 200 ASSERT(sizeof(Entry) == 12); |
177 | 201 |
178 // Make sure the flags does not name a specific type. | 202 // Make sure the flags does not name a specific type. |
179 ASSERT(Code::ExtractTypeFromFlags(flags) == 0); | 203 ASSERT(Code::ExtractTypeFromFlags(flags) == 0); |
180 | 204 |
181 // Make sure that there are no register conflicts. | 205 // Make sure that there are no register conflicts. |
182 ASSERT(!scratch.is(receiver)); | 206 ASSERT(!scratch.is(receiver)); |
183 ASSERT(!scratch.is(name)); | 207 ASSERT(!scratch.is(name)); |
184 ASSERT(!extra.is(receiver)); | 208 ASSERT(!extra.is(receiver)); |
185 ASSERT(!extra.is(name)); | 209 ASSERT(!extra.is(name)); |
186 ASSERT(!extra.is(scratch)); | 210 ASSERT(!extra.is(scratch)); |
187 ASSERT(!extra2.is(receiver)); | 211 ASSERT(!extra2.is(receiver)); |
188 ASSERT(!extra2.is(name)); | 212 ASSERT(!extra2.is(name)); |
189 ASSERT(!extra2.is(scratch)); | 213 ASSERT(!extra2.is(scratch)); |
190 ASSERT(!extra2.is(extra)); | 214 ASSERT(!extra2.is(extra)); |
191 | 215 |
192 // Check scratch, extra and extra2 registers are valid. | 216 // Check scratch, extra and extra2 registers are valid. |
193 ASSERT(!scratch.is(no_reg)); | 217 ASSERT(!scratch.is(no_reg)); |
194 ASSERT(!extra.is(no_reg)); | 218 ASSERT(!extra.is(no_reg)); |
195 ASSERT(!extra2.is(no_reg)); | 219 ASSERT(!extra2.is(no_reg)); |
220 ASSERT(!extra3.is(no_reg)); | |
221 | |
222 Counters* counters = masm->isolate()->counters(); | |
223 __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, | |
224 extra2, extra3); | |
196 | 225 |
197 // Check that the receiver isn't a smi. | 226 // Check that the receiver isn't a smi. |
198 __ JumpIfSmi(receiver, &miss); | 227 __ JumpIfSmi(receiver, &miss); |
199 | 228 |
200 // Get the map of the receiver and compute the hash. | 229 // Get the map of the receiver and compute the hash. |
201 __ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset)); | 230 __ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset)); |
202 __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 231 __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
203 __ add(scratch, scratch, Operand(ip)); | 232 __ add(scratch, scratch, Operand(ip)); |
204 uint32_t mask = (kPrimaryTableSize - 1) << kHeapObjectTagSize; | 233 uint32_t mask = (kPrimaryTableSize - 1) << kHeapObjectTagSize; |
205 // Mask down the eor argument to the minimum to keep the immediate | 234 // Mask down the eor argument to the minimum to keep the immediate |
206 // ARM-encodable. | 235 // ARM-encodable. |
207 __ eor(scratch, scratch, Operand(flags & mask)); | 236 __ eor(scratch, scratch, Operand(flags & mask)); |
208 // Prefer and_ to ubfx here because ubfx takes 2 cycles. | 237 // Prefer and_ to ubfx here because ubfx takes 2 cycles. |
209 __ and_(scratch, scratch, Operand(mask)); | 238 __ and_(scratch, scratch, Operand(mask)); |
210 __ mov(scratch, Operand(scratch, LSR, 1)); | 239 __ mov(scratch, Operand(scratch, LSR, 1)); |
211 | 240 |
212 // Probe the primary table. | 241 // Probe the primary table. |
213 ProbeTable(isolate, | 242 ProbeTable(isolate, |
214 masm, | 243 masm, |
215 flags, | 244 flags, |
216 kPrimary, | 245 kPrimary, |
246 receiver, | |
217 name, | 247 name, |
218 scratch, | 248 scratch, |
219 1, | |
220 extra, | 249 extra, |
221 extra2); | 250 extra2, |
251 extra3); | |
222 | 252 |
223 // Primary miss: Compute hash for secondary probe. | 253 // Primary miss: Compute hash for secondary probe. |
224 __ sub(scratch, scratch, Operand(name, LSR, 1)); | 254 __ sub(scratch, scratch, Operand(name, LSR, 1)); |
225 uint32_t mask2 = (kSecondaryTableSize - 1) << (kHeapObjectTagSize - 1); | 255 uint32_t mask2 = (kSecondaryTableSize - 1) << (kHeapObjectTagSize - 1); |
226 __ add(scratch, scratch, Operand((flags >> 1) & mask2)); | 256 __ add(scratch, scratch, Operand((flags >> 1) & mask2)); |
227 __ and_(scratch, scratch, Operand(mask2)); | 257 __ and_(scratch, scratch, Operand(mask2)); |
228 | 258 |
229 // Probe the secondary table. | 259 // Probe the secondary table. |
230 ProbeTable(isolate, | 260 ProbeTable(isolate, |
231 masm, | 261 masm, |
232 flags, | 262 flags, |
233 kSecondary, | 263 kSecondary, |
264 receiver, | |
234 name, | 265 name, |
235 scratch, | 266 scratch, |
236 1, | |
237 extra, | 267 extra, |
238 extra2); | 268 extra2, |
269 extra3); | |
239 | 270 |
240 // Cache miss: Fall-through and let caller handle the miss by | 271 // Cache miss: Fall-through and let caller handle the miss by |
241 // entering the runtime system. | 272 // entering the runtime system. |
242 __ bind(&miss); | 273 __ bind(&miss); |
274 __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, | |
275 extra2, extra3); | |
243 } | 276 } |
244 | 277 |
245 | 278 |
246 void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm, | 279 void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm, |
247 int index, | 280 int index, |
248 Register prototype) { | 281 Register prototype) { |
249 // Load the global or builtins object from the current context. | 282 // Load the global or builtins object from the current context. |
250 __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); | 283 __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); |
251 // Load the global context from the global or builtins object. | 284 // Load the global context from the global or builtins object. |
252 __ ldr(prototype, | 285 __ ldr(prototype, |
(...skipping 4169 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4422 __ Jump(ic_slow, RelocInfo::CODE_TARGET); | 4455 __ Jump(ic_slow, RelocInfo::CODE_TARGET); |
4423 } | 4456 } |
4424 } | 4457 } |
4425 | 4458 |
4426 | 4459 |
4427 #undef __ | 4460 #undef __ |
4428 | 4461 |
4429 } } // namespace v8::internal | 4462 } } // namespace v8::internal |
4430 | 4463 |
4431 #endif // V8_TARGET_ARCH_ARM | 4464 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |