Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(342)

Side by Side Diff: runtime/vm/stub_code_arm.cc

Issue 1192103004: VM: New calling convention for generated code. (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: ARM working, x64 cleanup Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" 5 #include "vm/globals.h"
6 #if defined(TARGET_ARCH_ARM) 6 #if defined(TARGET_ARCH_ARM)
7 7
8 #include "vm/assembler.h" 8 #include "vm/assembler.h"
9 #include "vm/code_generator.h" 9 #include "vm/code_generator.h"
10 #include "vm/cpu.h" 10 #include "vm/cpu.h"
(...skipping 24 matching lines...) Expand all
35 // SP : address of last argument in argument array. 35 // SP : address of last argument in argument array.
36 // SP + 4*R4 - 4 : address of first argument in argument array. 36 // SP + 4*R4 - 4 : address of first argument in argument array.
37 // SP + 4*R4 : address of return value. 37 // SP + 4*R4 : address of return value.
38 // R5 : address of the runtime function to call. 38 // R5 : address of the runtime function to call.
39 // R4 : number of arguments to the call. 39 // R4 : number of arguments to the call.
40 void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) { 40 void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) {
41 const intptr_t thread_offset = NativeArguments::thread_offset(); 41 const intptr_t thread_offset = NativeArguments::thread_offset();
42 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); 42 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset();
43 const intptr_t argv_offset = NativeArguments::argv_offset(); 43 const intptr_t argv_offset = NativeArguments::argv_offset();
44 const intptr_t retval_offset = NativeArguments::retval_offset(); 44 const intptr_t retval_offset = NativeArguments::retval_offset();
45 const intptr_t exitframe_last_param_slot_from_fp = 2;
46 45
47 __ EnterStubFrame(); 46 __ EnterStubFrame();
48 47
49 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R9)) != 0); 48 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R7)) != 0);
50 __ LoadIsolate(R9); 49 __ LoadIsolate(R7);
51 50
52 // Save exit frame information to enable stack walking as we are about 51 // Save exit frame information to enable stack walking as we are about
53 // to transition to Dart VM C++ code. 52 // to transition to Dart VM C++ code.
54 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset()); 53 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset());
55 54
56 #if defined(DEBUG) 55 #if defined(DEBUG)
57 { Label ok; 56 { Label ok;
58 // Check that we are always entering from Dart code. 57 // Check that we are always entering from Dart code.
59 __ LoadFromOffset(kWord, R6, R9, Isolate::vm_tag_offset()); 58 __ LoadFromOffset(kWord, R6, R7, Isolate::vm_tag_offset());
60 __ CompareImmediate(R6, VMTag::kDartTagId); 59 __ CompareImmediate(R6, VMTag::kDartTagId);
61 __ b(&ok, EQ); 60 __ b(&ok, EQ);
62 __ Stop("Not coming from Dart code."); 61 __ Stop("Not coming from Dart code.");
63 __ Bind(&ok); 62 __ Bind(&ok);
64 } 63 }
65 #endif 64 #endif
66 65
67 // Mark that the isolate is executing VM code. 66 // Mark that the isolate is executing VM code.
68 __ StoreToOffset(kWord, R5, R9, Isolate::vm_tag_offset()); 67 __ StoreToOffset(kWord, R5, R7, Isolate::vm_tag_offset());
69 68
70 // Reserve space for arguments and align frame before entering C++ world. 69 // Reserve space for arguments and align frame before entering C++ world.
71 // NativeArguments are passed in registers. 70 // NativeArguments are passed in registers.
72 ASSERT(sizeof(NativeArguments) == 4 * kWordSize); 71 ASSERT(sizeof(NativeArguments) == 4 * kWordSize);
73 __ ReserveAlignedFrameSpace(0); 72 __ ReserveAlignedFrameSpace(0);
74 73
75 // Pass NativeArguments structure by value and call runtime. 74 // Pass NativeArguments structure by value and call runtime.
76 // Registers R0, R1, R2, and R3 are used. 75 // Registers R0, R1, R2, and R3 are used.
77 76
78 ASSERT(thread_offset == 0 * kWordSize); 77 ASSERT(thread_offset == 0 * kWordSize);
79 // Set thread in NativeArgs. 78 // Set thread in NativeArgs.
80 __ mov(R0, Operand(THR)); 79 __ mov(R0, Operand(THR));
81 80
82 // There are no runtime calls to closures, so we do not need to set the tag 81 // There are no runtime calls to closures, so we do not need to set the tag
83 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. 82 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_.
84 ASSERT(argc_tag_offset == 1 * kWordSize); 83 ASSERT(argc_tag_offset == 1 * kWordSize);
85 __ mov(R1, Operand(R4)); // Set argc in NativeArguments. 84 __ mov(R1, Operand(R4)); // Set argc in NativeArguments.
86 85
87 ASSERT(argv_offset == 2 * kWordSize); 86 ASSERT(argv_offset == 2 * kWordSize);
88 __ add(R2, FP, Operand(R4, LSL, 2)); // Compute argv. 87 __ add(R2, FP, Operand(R4, LSL, 2)); // Compute argv.
89 // Set argv in NativeArguments. 88 // Set argv in NativeArguments.
90 __ AddImmediate(R2, exitframe_last_param_slot_from_fp * kWordSize); 89 __ AddImmediate(R2, kParamEndSlotFromFp * kWordSize);
91 90
92 ASSERT(retval_offset == 3 * kWordSize); 91 ASSERT(retval_offset == 3 * kWordSize);
93 __ add(R3, R2, Operand(kWordSize)); // Retval is next to 1st argument. 92 __ add(R3, R2, Operand(kWordSize)); // Retval is next to 1st argument.
94 93
95 // Call runtime or redirection via simulator. 94 // Call runtime or redirection via simulator.
96 __ blx(R5); 95 __ blx(R5);
97 96
98 // Mark that the isolate is executing Dart code. 97 // Mark that the isolate is executing Dart code.
99 __ LoadImmediate(R2, VMTag::kDartTagId); 98 __ LoadImmediate(R2, VMTag::kDartTagId);
100 __ StoreToOffset(kWord, R2, R9, Isolate::vm_tag_offset()); 99 __ StoreToOffset(kWord, R2, R7, Isolate::vm_tag_offset());
101 100
102 // Reset exit frame information in Isolate structure. 101 // Reset exit frame information in Isolate structure.
103 __ LoadImmediate(R2, 0); 102 __ LoadImmediate(R2, 0);
104 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset()); 103 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset());
105 104
106 __ LeaveStubFrame(); 105 __ LeaveStubFrame();
107 __ Ret(); 106 __ Ret();
108 } 107 }
109 108
110 109
(...skipping 23 matching lines...) Expand all
134 // R2 : address of first argument in argument array. 133 // R2 : address of first argument in argument array.
135 // R1 : argc_tag including number of arguments and function kind. 134 // R1 : argc_tag including number of arguments and function kind.
136 void StubCode::GenerateCallNativeCFunctionStub(Assembler* assembler) { 135 void StubCode::GenerateCallNativeCFunctionStub(Assembler* assembler) {
137 const intptr_t thread_offset = NativeArguments::thread_offset(); 136 const intptr_t thread_offset = NativeArguments::thread_offset();
138 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); 137 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset();
139 const intptr_t argv_offset = NativeArguments::argv_offset(); 138 const intptr_t argv_offset = NativeArguments::argv_offset();
140 const intptr_t retval_offset = NativeArguments::retval_offset(); 139 const intptr_t retval_offset = NativeArguments::retval_offset();
141 140
142 __ EnterStubFrame(); 141 __ EnterStubFrame();
143 142
144 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R9)) != 0); 143 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R7)) != 0);
145 __ LoadIsolate(R9); 144 __ LoadIsolate(R7);
146 145
147 // Save exit frame information to enable stack walking as we are about 146 // Save exit frame information to enable stack walking as we are about
148 // to transition to native code. 147 // to transition to native code.
149 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset()); 148 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset());
150 149
151 #if defined(DEBUG) 150 #if defined(DEBUG)
152 { Label ok; 151 { Label ok;
153 // Check that we are always entering from Dart code. 152 // Check that we are always entering from Dart code.
154 __ LoadFromOffset(kWord, R6, R9, Isolate::vm_tag_offset()); 153 __ LoadFromOffset(kWord, R6, R7, Isolate::vm_tag_offset());
155 __ CompareImmediate(R6, VMTag::kDartTagId); 154 __ CompareImmediate(R6, VMTag::kDartTagId);
156 __ b(&ok, EQ); 155 __ b(&ok, EQ);
157 __ Stop("Not coming from Dart code."); 156 __ Stop("Not coming from Dart code.");
158 __ Bind(&ok); 157 __ Bind(&ok);
159 } 158 }
160 #endif 159 #endif
161 160
162 // Mark that the isolate is executing Native code. 161 // Mark that the isolate is executing Native code.
163 __ StoreToOffset(kWord, R5, R9, Isolate::vm_tag_offset()); 162 __ StoreToOffset(kWord, R5, R7, Isolate::vm_tag_offset());
164 163
165 // Reserve space for the native arguments structure passed on the stack (the 164 // Reserve space for the native arguments structure passed on the stack (the
166 // outgoing pointer parameter to the native arguments structure is passed in 165 // outgoing pointer parameter to the native arguments structure is passed in
167 // R0) and align frame before entering the C++ world. 166 // R0) and align frame before entering the C++ world.
168 __ ReserveAlignedFrameSpace(sizeof(NativeArguments)); 167 __ ReserveAlignedFrameSpace(sizeof(NativeArguments));
169 168
170 // Initialize NativeArguments structure and call native function. 169 // Initialize NativeArguments structure and call native function.
171 // Registers R0, R1, R2, and R3 are used. 170 // Registers R0, R1, R2, and R3 are used.
172 171
173 ASSERT(thread_offset == 0 * kWordSize); 172 ASSERT(thread_offset == 0 * kWordSize);
174 // Set thread in NativeArgs. 173 // Set thread in NativeArgs.
175 __ mov(R0, Operand(THR)); 174 __ mov(R0, Operand(THR));
176 175
177 // There are no native calls to closures, so we do not need to set the tag 176 // There are no native calls to closures, so we do not need to set the tag
178 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. 177 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_.
179 ASSERT(argc_tag_offset == 1 * kWordSize); 178 ASSERT(argc_tag_offset == 1 * kWordSize);
180 // Set argc in NativeArguments: R1 already contains argc. 179 // Set argc in NativeArguments: R1 already contains argc.
181 180
182 ASSERT(argv_offset == 2 * kWordSize); 181 ASSERT(argv_offset == 2 * kWordSize);
183 // Set argv in NativeArguments: R2 already contains argv. 182 // Set argv in NativeArguments: R2 already contains argv.
184 183
185 ASSERT(retval_offset == 3 * kWordSize); 184 ASSERT(retval_offset == 3 * kWordSize);
186 __ add(R3, FP, Operand(3 * kWordSize)); // Set retval in NativeArgs. 185 // Set retval in NativeArgs.
186 __ add(R3, FP, Operand(kCallerSpSlotFromFp * kWordSize));
187 187
188 // Passing the structure by value as in runtime calls would require changing 188 // Passing the structure by value as in runtime calls would require changing
189 // Dart API for native functions. 189 // Dart API for native functions.
190 // For now, space is reserved on the stack and we pass a pointer to it. 190 // For now, space is reserved on the stack and we pass a pointer to it.
191 __ stm(IA, SP, (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3)); 191 __ stm(IA, SP, (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3));
192 __ mov(R0, Operand(SP)); // Pass the pointer to the NativeArguments. 192 __ mov(R0, Operand(SP)); // Pass the pointer to the NativeArguments.
193 193
194 __ mov(R1, Operand(R5)); // Pass the function entrypoint to call. 194 __ mov(R1, Operand(R5)); // Pass the function entrypoint to call.
195 // Call native function invocation wrapper or redirection via simulator. 195 // Call native function invocation wrapper or redirection via simulator.
196 #if defined(USING_SIMULATOR) 196 #if defined(USING_SIMULATOR)
197 uword entry = reinterpret_cast<uword>(NativeEntry::NativeCallWrapper); 197 uword entry = reinterpret_cast<uword>(NativeEntry::NativeCallWrapper);
198 entry = Simulator::RedirectExternalReference( 198 const ExternalLabel label(Simulator::RedirectExternalReference(
199 entry, Simulator::kNativeCall, NativeEntry::kNumCallWrapperArguments); 199 entry, Simulator::kNativeCall, NativeEntry::kNumCallWrapperArguments));
200 __ LoadImmediate(R2, entry); 200 __ LoadExternalLabel(R2, &label, kNotPatchable);
201 __ blx(R2); 201 __ blx(R2);
202 #else 202 #else
203 __ BranchLink(&NativeEntry::NativeCallWrapperLabel(), kNotPatchable); 203 __ LoadExternalLabel(
204 LR, &NativeEntry::NativeCallWrapperLabel(), kNotPatchable);
205 __ blx(LR);
204 #endif 206 #endif
205 207
206 // Mark that the isolate is executing Dart code. 208 // Mark that the isolate is executing Dart code.
207 __ LoadImmediate(R2, VMTag::kDartTagId); 209 __ LoadImmediate(R2, VMTag::kDartTagId);
208 __ StoreToOffset(kWord, R2, R9, Isolate::vm_tag_offset()); 210 __ StoreToOffset(kWord, R2, R7, Isolate::vm_tag_offset());
209 211
210 // Reset exit frame information in Isolate structure. 212 // Reset exit frame information in Isolate structure.
211 __ LoadImmediate(R2, 0); 213 __ LoadImmediate(R2, 0);
212 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset()); 214 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset());
213 215
214 __ LeaveStubFrame(); 216 __ LeaveStubFrame();
215 __ Ret(); 217 __ Ret();
216 } 218 }
217 219
218 220
219 // Input parameters: 221 // Input parameters:
220 // LR : return address. 222 // LR : return address.
221 // SP : address of return value. 223 // SP : address of return value.
222 // R5 : address of the native function to call. 224 // R5 : address of the native function to call.
223 // R2 : address of first argument in argument array. 225 // R2 : address of first argument in argument array.
224 // R1 : argc_tag including number of arguments and function kind. 226 // R1 : argc_tag including number of arguments and function kind.
225 void StubCode::GenerateCallBootstrapCFunctionStub(Assembler* assembler) { 227 void StubCode::GenerateCallBootstrapCFunctionStub(Assembler* assembler) {
226 const intptr_t thread_offset = NativeArguments::thread_offset(); 228 const intptr_t thread_offset = NativeArguments::thread_offset();
227 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); 229 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset();
228 const intptr_t argv_offset = NativeArguments::argv_offset(); 230 const intptr_t argv_offset = NativeArguments::argv_offset();
229 const intptr_t retval_offset = NativeArguments::retval_offset(); 231 const intptr_t retval_offset = NativeArguments::retval_offset();
230 232
231 __ EnterStubFrame(); 233 __ EnterStubFrame();
232 234
233 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R9)) != 0); 235 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R7)) != 0);
234 __ LoadIsolate(R9); 236 __ LoadIsolate(R7);
235 237
236 // Save exit frame information to enable stack walking as we are about 238 // Save exit frame information to enable stack walking as we are about
237 // to transition to native code. 239 // to transition to native code.
238 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset()); 240 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset());
239 241
240 #if defined(DEBUG) 242 #if defined(DEBUG)
241 { Label ok; 243 { Label ok;
242 // Check that we are always entering from Dart code. 244 // Check that we are always entering from Dart code.
243 __ LoadFromOffset(kWord, R6, R9, Isolate::vm_tag_offset()); 245 __ LoadFromOffset(kWord, R6, R7, Isolate::vm_tag_offset());
244 __ CompareImmediate(R6, VMTag::kDartTagId); 246 __ CompareImmediate(R6, VMTag::kDartTagId);
245 __ b(&ok, EQ); 247 __ b(&ok, EQ);
246 __ Stop("Not coming from Dart code."); 248 __ Stop("Not coming from Dart code.");
247 __ Bind(&ok); 249 __ Bind(&ok);
248 } 250 }
249 #endif 251 #endif
250 252
251 // Mark that the isolate is executing Native code. 253 // Mark that the isolate is executing Native code.
252 __ StoreToOffset(kWord, R5, R9, Isolate::vm_tag_offset()); 254 __ StoreToOffset(kWord, R5, R7, Isolate::vm_tag_offset());
253 255
254 // Reserve space for the native arguments structure passed on the stack (the 256 // Reserve space for the native arguments structure passed on the stack (the
255 // outgoing pointer parameter to the native arguments structure is passed in 257 // outgoing pointer parameter to the native arguments structure is passed in
256 // R0) and align frame before entering the C++ world. 258 // R0) and align frame before entering the C++ world.
257 __ ReserveAlignedFrameSpace(sizeof(NativeArguments)); 259 __ ReserveAlignedFrameSpace(sizeof(NativeArguments));
258 260
259 // Initialize NativeArguments structure and call native function. 261 // Initialize NativeArguments structure and call native function.
260 // Registers R0, R1, R2, and R3 are used. 262 // Registers R0, R1, R2, and R3 are used.
261 263
262 ASSERT(thread_offset == 0 * kWordSize); 264 ASSERT(thread_offset == 0 * kWordSize);
263 // Set thread in NativeArgs. 265 // Set thread in NativeArgs.
264 __ mov(R0, Operand(THR)); 266 __ mov(R0, Operand(THR));
265 267
266 // There are no native calls to closures, so we do not need to set the tag 268 // There are no native calls to closures, so we do not need to set the tag
267 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. 269 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_.
268 ASSERT(argc_tag_offset == 1 * kWordSize); 270 ASSERT(argc_tag_offset == 1 * kWordSize);
269 // Set argc in NativeArguments: R1 already contains argc. 271 // Set argc in NativeArguments: R1 already contains argc.
270 272
271 ASSERT(argv_offset == 2 * kWordSize); 273 ASSERT(argv_offset == 2 * kWordSize);
272 // Set argv in NativeArguments: R2 already contains argv. 274 // Set argv in NativeArguments: R2 already contains argv.
273 275
274 ASSERT(retval_offset == 3 * kWordSize); 276 ASSERT(retval_offset == 3 * kWordSize);
275 __ add(R3, FP, Operand(3 * kWordSize)); // Set retval in NativeArgs. 277 // Set retval in NativeArgs.
278 __ add(R3, FP, Operand(kCallerSpSlotFromFp * kWordSize));
276 279
277 // Passing the structure by value as in runtime calls would require changing 280 // Passing the structure by value as in runtime calls would require changing
278 // Dart API for native functions. 281 // Dart API for native functions.
279 // For now, space is reserved on the stack and we pass a pointer to it. 282 // For now, space is reserved on the stack and we pass a pointer to it.
280 __ stm(IA, SP, (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3)); 283 __ stm(IA, SP, (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3));
281 __ mov(R0, Operand(SP)); // Pass the pointer to the NativeArguments. 284 __ mov(R0, Operand(SP)); // Pass the pointer to the NativeArguments.
282 285
283 // Call native function or redirection via simulator. 286 // Call native function or redirection via simulator.
284 __ blx(R5); 287 __ blx(R5);
285 288
286 // Mark that the isolate is executing Dart code. 289 // Mark that the isolate is executing Dart code.
287 __ LoadImmediate(R2, VMTag::kDartTagId); 290 __ LoadImmediate(R2, VMTag::kDartTagId);
288 __ StoreToOffset(kWord, R2, R9, Isolate::vm_tag_offset()); 291 __ StoreToOffset(kWord, R2, R7, Isolate::vm_tag_offset());
289 292
290 // Reset exit frame information in Isolate structure. 293 // Reset exit frame information in Isolate structure.
291 __ LoadImmediate(R2, 0); 294 __ LoadImmediate(R2, 0);
292 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset()); 295 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset());
293 296
294 __ LeaveStubFrame(); 297 __ LeaveStubFrame();
295 __ Ret(); 298 __ Ret();
296 } 299 }
297 300
298 301
299 // Input parameters: 302 // Input parameters:
300 // R4: arguments descriptor array. 303 // R4: arguments descriptor array.
301 void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) { 304 void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) {
302 // Create a stub frame as we are pushing some objects on the stack before 305 // Create a stub frame as we are pushing some objects on the stack before
303 // calling into the runtime. 306 // calling into the runtime.
304 __ EnterStubFrame(); 307 __ EnterStubFrame();
305 // Setup space on stack for return value and preserve arguments descriptor. 308 // Setup space on stack for return value and preserve arguments descriptor.
306 __ LoadImmediate(R0, reinterpret_cast<intptr_t>(Object::null())); 309 __ LoadImmediate(R0, reinterpret_cast<intptr_t>(Object::null()));
307 __ PushList((1 << R0) | (1 << R4)); 310 __ PushList((1 << R0) | (1 << R4));
308 __ CallRuntime(kPatchStaticCallRuntimeEntry, 0); 311 __ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
309 // Get Code object result and restore arguments descriptor array. 312 // Get Code object result and restore arguments descriptor array.
310 __ PopList((1 << R0) | (1 << R4)); 313 __ PopList((1 << R0) | (1 << R4));
311 // Remove the stub frame. 314 // Remove the stub frame.
312 __ LeaveStubFrame(); 315 __ LeaveStubFrame();
313 // Jump to the dart function. 316 // Jump to the dart function.
317 __ mov(CODE_REG, Operand(R0));
314 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset())); 318 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset()));
315 __ bx(R0); 319 __ bx(R0);
316 } 320 }
317 321
318 322
319 // Called from a static call only when an invalid code has been entered 323 // Called from a static call only when an invalid code has been entered
320 // (invalid because its function was optimized or deoptimized). 324 // (invalid because its function was optimized or deoptimized).
321 // R4: arguments descriptor array. 325 // R4: arguments descriptor array.
322 void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) { 326 void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) {
327 // Load code pointer to this stub from the thread:
328 // The one that is passed in, is not correct - it points to the code object
329 // that needs to be replaced.
330 __ ldr(CODE_REG, Address(THR, Thread::fix_callers_target_code_offset()));
323 // Create a stub frame as we are pushing some objects on the stack before 331 // Create a stub frame as we are pushing some objects on the stack before
324 // calling into the runtime. 332 // calling into the runtime.
325 __ EnterStubFrame(); 333 __ EnterStubFrame();
326 // Setup space on stack for return value and preserve arguments descriptor. 334 // Setup space on stack for return value and preserve arguments descriptor.
327 __ LoadImmediate(R0, reinterpret_cast<intptr_t>(Object::null())); 335 __ LoadImmediate(R0, reinterpret_cast<intptr_t>(Object::null()));
328 __ PushList((1 << R0) | (1 << R4)); 336 __ PushList((1 << R0) | (1 << R4));
329 __ CallRuntime(kFixCallersTargetRuntimeEntry, 0); 337 __ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
330 // Get Code object result and restore arguments descriptor array. 338 // Get Code object result and restore arguments descriptor array.
331 __ PopList((1 << R0) | (1 << R4)); 339 __ PopList((1 << R0) | (1 << R4));
332 // Remove the stub frame. 340 // Remove the stub frame.
333 __ LeaveStubFrame(); 341 __ LeaveStubFrame();
334 // Jump to the dart function. 342 // Jump to the dart function.
343 __ mov(CODE_REG, Operand(R0));
335 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset())); 344 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset()));
336 __ bx(R0); 345 __ bx(R0);
337 } 346 }
338 347
339 348
340 // Called from object allocate instruction when the allocation stub has been 349 // Called from object allocate instruction when the allocation stub has been
341 // disabled. 350 // disabled.
342 void StubCode::GenerateFixAllocationStubTargetStub(Assembler* assembler) { 351 void StubCode::GenerateFixAllocationStubTargetStub(Assembler* assembler) {
352 // Load code pointer to this stub from the thread:
353 // The one that is passed in, is not correct - it points to the code object
354 // that needs to be replaced.
355 __ ldr(CODE_REG, Address(THR, Thread::fix_allocation_stub_code_offset()));
343 __ EnterStubFrame(); 356 __ EnterStubFrame();
344 // Setup space on stack for return value. 357 // Setup space on stack for return value.
345 __ LoadImmediate(R0, reinterpret_cast<intptr_t>(Object::null())); 358 __ LoadImmediate(R0, reinterpret_cast<intptr_t>(Object::null()));
346 __ Push(R0); 359 __ Push(R0);
347 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0); 360 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
348 // Get Code object result. 361 // Get Code object result.
349 __ Pop(R0); 362 __ Pop(R0);
350 // Remove the stub frame. 363 // Remove the stub frame.
351 __ LeaveStubFrame(); 364 __ LeaveStubFrame();
352 // Jump to the dart function. 365 // Jump to the dart function.
366 __ mov(CODE_REG, Operand(R0));
353 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset())); 367 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset()));
354 __ bx(R0); 368 __ bx(R0);
355 } 369 }
356 370
357 371
358 // Input parameters: 372 // Input parameters:
359 // R2: smi-tagged argument count, may be zero. 373 // R2: smi-tagged argument count, may be zero.
360 // FP[kParamEndSlotFromFp + 1]: last argument. 374 // FP[kParamEndSlotFromFp + 1]: last argument.
361 static void PushArgumentsArray(Assembler* assembler) { 375 static void PushArgumentsArray(Assembler* assembler) {
362 // Allocate array to store arguments of caller. 376 // Allocate array to store arguments of caller.
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
396 // - Materialize objects that require allocation (e.g. Double instances). 410 // - Materialize objects that require allocation (e.g. Double instances).
397 // GC can occur only after frame is fully rewritten. 411 // GC can occur only after frame is fully rewritten.
398 // Stack after EnterFrame(...) below: 412 // Stack after EnterFrame(...) below:
399 // +------------------+ 413 // +------------------+
400 // | Saved PP | <- TOS 414 // | Saved PP | <- TOS
401 // +------------------+ 415 // +------------------+
402 // | Saved FP | <- FP of stub 416 // | Saved FP | <- FP of stub
403 // +------------------+ 417 // +------------------+
404 // | Saved LR | (deoptimization point) 418 // | Saved LR | (deoptimization point)
405 // +------------------+ 419 // +------------------+
406 // | PC marker | 420 // | pc marker |
421 // +------------------+
422 // | Saved CODE_REG |
407 // +------------------+ 423 // +------------------+
408 // | ... | <- SP of optimized frame 424 // | ... | <- SP of optimized frame
409 // 425 //
410 // Parts of the code cannot GC, part of the code can GC. 426 // Parts of the code cannot GC, part of the code can GC.
411 static void GenerateDeoptimizationSequence(Assembler* assembler, 427 static void GenerateDeoptimizationSequence(Assembler* assembler,
412 bool preserve_result) { 428 DeoptStubKind kind) {
413 // DeoptimizeCopyFrame expects a Dart frame, i.e. EnterDartFrame(0), but there 429 // DeoptimizeCopyFrame expects a Dart frame, i.e. EnterDartFrame(0), but there
414 // is no need to set the correct PC marker or load PP, since they get patched. 430 // is no need to set the correct PC marker or load PP, since they get patched.
415 431
416 // IP has the potentially live LR value. LR was clobbered by the call with 432 // IP has the potentially live LR value. LR was clobbered by the call with
417 // the return address, so move it into IP to set up the Dart frame. 433 // the return address, so move it into IP to set up the Dart frame.
418 __ eor(IP, IP, Operand(LR)); 434 __ eor(IP, IP, Operand(LR));
419 __ eor(LR, IP, Operand(LR)); 435 __ eor(LR, IP, Operand(LR));
420 __ eor(IP, IP, Operand(LR)); 436 __ eor(IP, IP, Operand(LR));
421 437
422 // Set up the frame manually. We can't use EnterFrame because we can't 438 // Set up the frame manually with return address now stored in IP.
423 // clobber LR (or any other register) with 0, yet. 439 __ EnterFrame((1 << PP) | (1 << CODE_REG) | (1 << FP) | (1 << IP), 0);
424 __ sub(SP, SP, Operand(kWordSize)); // Make room for PC marker of 0.
425 __ Push(IP); // Push return address.
426 __ Push(FP);
427 __ mov(FP, Operand(SP));
428 __ Push(PP);
429
430 __ LoadPoolPointer(); 440 __ LoadPoolPointer();
431 441
432 // Now that IP holding the return address has been written to the stack,
433 // we can clobber it with 0 to write the null PC marker.
434 __ mov(IP, Operand(0));
435 __ str(IP, Address(SP, +3 * kWordSize));
436
437 // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry 442 // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry
438 // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls. 443 // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
439 const intptr_t saved_result_slot_from_fp = 444 const intptr_t saved_result_slot_from_fp =
440 kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - R0); 445 kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - R0);
441 // Result in R0 is preserved as part of pushing all registers below. 446 // Result in R0 is preserved as part of pushing all registers below.
442 447
443 // Push registers in their enumeration order: lowest register number at 448 // Push registers in their enumeration order: lowest register number at
444 // lowest address. 449 // lowest address.
445 __ PushList(kAllCpuRegistersList); 450 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
451 if (i == CODE_REG) {
452 // Value of IP does not matter, but we can overwrite it in any case
453 // since it is already pushed before CODE_REG.
454 __ ldr(IP, Address(FP, 2 * kWordSize));
455 __ Push(IP);
456 } else {
457 __ Push(static_cast<Register>(i));
458 }
459 }
460 // TODO(fschneider): Use PushList, store saved value of CODE_REG separately.
461 // __ PushList(kAllCpuRegistersList);
446 462
447 if (TargetCPUFeatures::vfp_supported()) { 463 if (TargetCPUFeatures::vfp_supported()) {
448 ASSERT(kFpuRegisterSize == 4 * kWordSize); 464 ASSERT(kFpuRegisterSize == 4 * kWordSize);
449 if (kNumberOfDRegisters > 16) { 465 if (kNumberOfDRegisters > 16) {
450 __ vstmd(DB_W, SP, D16, kNumberOfDRegisters - 16); 466 __ vstmd(DB_W, SP, D16, kNumberOfDRegisters - 16);
451 __ vstmd(DB_W, SP, D0, 16); 467 __ vstmd(DB_W, SP, D0, 16);
452 } else { 468 } else {
453 __ vstmd(DB_W, SP, D0, kNumberOfDRegisters); 469 __ vstmd(DB_W, SP, D0, kNumberOfDRegisters);
454 } 470 }
455 } else { 471 } else {
456 __ AddImmediate(SP, SP, -kNumberOfFpuRegisters * kFpuRegisterSize); 472 __ AddImmediate(SP, SP, -kNumberOfFpuRegisters * kFpuRegisterSize);
457 } 473 }
458 474
459 __ mov(R0, Operand(SP)); // Pass address of saved registers block. 475 __ mov(R0, Operand(SP)); // Pass address of saved registers block.
476 __ mov(R1, Operand(kind == kLazyDeopt ? 1 : 0));
460 __ ReserveAlignedFrameSpace(0); 477 __ ReserveAlignedFrameSpace(0);
461 __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 1); 478 __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2);
462 // Result (R0) is stack-size (FP - SP) in bytes. 479 // Result (R0) is stack-size (FP - SP) in bytes.
463 480
481 const bool preserve_result = (kind == kLazyDeopt);
464 if (preserve_result) { 482 if (preserve_result) {
465 // Restore result into R1 temporarily. 483 // Restore result into R1 temporarily.
466 __ ldr(R1, Address(FP, saved_result_slot_from_fp * kWordSize)); 484 __ ldr(R1, Address(FP, saved_result_slot_from_fp * kWordSize));
467 } 485 }
468 486
487 __ RestoreCodePointer();
469 __ LeaveDartFrame(); 488 __ LeaveDartFrame();
470 __ sub(SP, FP, Operand(R0)); 489 __ sub(SP, FP, Operand(R0));
471 490
472 // DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there 491 // DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there
473 // is no need to set the correct PC marker or load PP, since they get patched. 492 // is no need to set the correct PC marker or load PP, since they get patched.
474 __ EnterStubFrame(); 493 __ EnterStubFrame();
475 __ mov(R0, Operand(FP)); // Get last FP address. 494 __ mov(R0, Operand(FP)); // Get last FP address.
476 if (preserve_result) { 495 if (preserve_result) {
477 __ Push(R1); // Preserve result as first local. 496 __ Push(R1); // Preserve result as first local.
478 } 497 }
479 __ ReserveAlignedFrameSpace(0); 498 __ ReserveAlignedFrameSpace(0);
480 __ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1); // Pass last FP in R0. 499 __ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1); // Pass last FP in R0.
481 if (preserve_result) { 500 if (preserve_result) {
482 // Restore result into R1. 501 // Restore result into R1.
483 __ ldr(R1, Address(FP, kFirstLocalSlotFromFp * kWordSize)); 502 __ ldr(R1, Address(FP, kFirstLocalSlotFromFp * kWordSize));
484 } 503 }
485 // Code above cannot cause GC. 504 // Code above cannot cause GC.
505 __ RestoreCodePointer();
486 __ LeaveStubFrame(); 506 __ LeaveStubFrame();
487 507
488 // Frame is fully rewritten at this point and it is safe to perform a GC. 508 // Frame is fully rewritten at this point and it is safe to perform a GC.
489 // Materialize any objects that were deferred by FillFrame because they 509 // Materialize any objects that were deferred by FillFrame because they
490 // require allocation. 510 // require allocation.
491 // Enter stub frame with loading PP. The caller's PP is not materialized yet. 511 // Enter stub frame with loading PP. The caller's PP is not materialized yet.
492 __ EnterStubFrame(); 512 __ EnterStubFrame();
493 if (preserve_result) { 513 if (preserve_result) {
494 __ Push(R1); // Preserve result, it will be GC-d here. 514 __ Push(R1); // Preserve result, it will be GC-d here.
495 } 515 }
496 __ PushObject(Smi::ZoneHandle()); // Space for the result. 516 __ PushObject(Smi::ZoneHandle()); // Space for the result.
497 __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0); 517 __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0);
498 // Result tells stub how many bytes to remove from the expression stack 518 // Result tells stub how many bytes to remove from the expression stack
499 // of the bottom-most frame. They were used as materialization arguments. 519 // of the bottom-most frame. They were used as materialization arguments.
500 __ Pop(R1); 520 __ Pop(R1);
501 if (preserve_result) { 521 if (preserve_result) {
502 __ Pop(R0); // Restore result. 522 __ Pop(R0); // Restore result.
503 } 523 }
504 __ LeaveStubFrame(); 524 __ LeaveStubFrame();
505 // Remove materialization arguments. 525 // Remove materialization arguments.
506 __ add(SP, SP, Operand(R1, ASR, kSmiTagSize)); 526 __ add(SP, SP, Operand(R1, ASR, kSmiTagSize));
507 __ Ret(); 527 __ Ret();
508 } 528 }
509 529
510 530
511 void StubCode::GenerateDeoptimizeLazyStub(Assembler* assembler) { 531 void StubCode::GenerateDeoptimizeLazyStub(Assembler* assembler) {
512 // Correct return address to point just after the call that is being 532 // Correct return address to point just after the call that is being
513 // deoptimized. 533 // deoptimized.
514 __ AddImmediate(LR, -CallPattern::LengthInBytes()); 534 __ AddImmediate(LR, -CallPattern::DeoptCallPatternLengthInBytes());
515 GenerateDeoptimizationSequence(assembler, true); // Preserve R0. 535 // Push zap value instead of CODE_REG for lazy deopt.
536 __ LoadImmediate(IP, 0xf1f1f1f1);
537 __ Push(IP);
538 GenerateDeoptimizationSequence(assembler, kLazyDeopt);
516 } 539 }
517 540
518 541
519 void StubCode::GenerateDeoptimizeStub(Assembler* assembler) { 542 void StubCode::GenerateDeoptimizeStub(Assembler* assembler) {
520 GenerateDeoptimizationSequence(assembler, false); // Don't preserve R0. 543 GenerateDeoptimizationSequence(assembler, kEagerDeopt);
521 } 544 }
522 545
523 546
524 static void GenerateDispatcherCode(Assembler* assembler, 547 static void GenerateDispatcherCode(Assembler* assembler,
525 Label* call_target_function) { 548 Label* call_target_function) {
526 __ Comment("NoSuchMethodDispatch"); 549 __ Comment("NoSuchMethodDispatch");
527 // When lazily generated invocation dispatchers are disabled, the 550 // When lazily generated invocation dispatchers are disabled, the
528 // miss-handler may return null. 551 // miss-handler may return null.
529 __ CompareObject(R0, Object::null_object()); 552 __ CompareObject(R0, Object::null_object());
530 __ b(call_target_function, NE); 553 __ b(call_target_function, NE);
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
566 __ LoadImmediate(IP, reinterpret_cast<intptr_t>(Object::null())); 589 __ LoadImmediate(IP, reinterpret_cast<intptr_t>(Object::null()));
567 __ PushList((1 << R4) | (1 << R5) | (1 << R6) | (1 << IP)); 590 __ PushList((1 << R4) | (1 << R5) | (1 << R6) | (1 << IP));
568 __ CallRuntime(kMegamorphicCacheMissHandlerRuntimeEntry, 3); 591 __ CallRuntime(kMegamorphicCacheMissHandlerRuntimeEntry, 3);
569 // Remove arguments. 592 // Remove arguments.
570 __ Drop(3); 593 __ Drop(3);
571 __ Pop(R0); // Get result into R0 (target function). 594 __ Pop(R0); // Get result into R0 (target function).
572 595
573 // Restore IC data and arguments descriptor. 596 // Restore IC data and arguments descriptor.
574 __ PopList((1 << R4) | (1 << R5)); 597 __ PopList((1 << R4) | (1 << R5));
575 598
599 __ RestoreCodePointer();
576 __ LeaveStubFrame(); 600 __ LeaveStubFrame();
577 601
578 if (!FLAG_lazy_dispatchers) { 602 if (!FLAG_lazy_dispatchers) {
579 Label call_target_function; 603 Label call_target_function;
580 GenerateDispatcherCode(assembler, &call_target_function); 604 GenerateDispatcherCode(assembler, &call_target_function);
581 __ Bind(&call_target_function); 605 __ Bind(&call_target_function);
582 } 606 }
583 607
584 // Tail-call to target function. 608 // Tail-call to target function.
609 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
585 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset())); 610 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset()));
586 __ bx(R2); 611 __ bx(R2);
587 } 612 }
588 613
589 614
590 // Called for inline allocation of arrays. 615 // Called for inline allocation of arrays.
591 // Input parameters: 616 // Input parameters:
592 // LR: return address. 617 // LR: return address.
593 // R1: array element type (either NULL or an instantiated type). 618 // R1: array element type (either NULL or an instantiated type).
594 // R2: array length as Smi (must be preserved). 619 // R2: array length as Smi (must be preserved).
(...skipping 18 matching lines...) Expand all
613 const intptr_t max_len = 638 const intptr_t max_len =
614 reinterpret_cast<int32_t>(Smi::New(Array::kMaxElements)); 639 reinterpret_cast<int32_t>(Smi::New(Array::kMaxElements));
615 __ CompareImmediate(R3, max_len); 640 __ CompareImmediate(R3, max_len);
616 __ b(&slow_case, GT); 641 __ b(&slow_case, GT);
617 642
618 const intptr_t cid = kArrayCid; 643 const intptr_t cid = kArrayCid;
619 __ MaybeTraceAllocation(cid, R4, &slow_case, 644 __ MaybeTraceAllocation(cid, R4, &slow_case,
620 /* inline_isolate = */ false); 645 /* inline_isolate = */ false);
621 646
622 const intptr_t fixed_size = sizeof(RawArray) + kObjectAlignment - 1; 647 const intptr_t fixed_size = sizeof(RawArray) + kObjectAlignment - 1;
623 __ LoadImmediate(R9, fixed_size); 648 __ LoadImmediate(R5, fixed_size);
624 __ add(R9, R9, Operand(R3, LSL, 1)); // R3 is a Smi. 649 __ add(R5, R5, Operand(R3, LSL, 1)); // R3 is a Smi.
625 ASSERT(kSmiTagShift == 1); 650 ASSERT(kSmiTagShift == 1);
626 __ bic(R9, R9, Operand(kObjectAlignment - 1)); 651 __ bic(R5, R5, Operand(kObjectAlignment - 1));
627 652
628 // R9: Allocation size. 653 // R5: Allocation size.
629 Heap::Space space = Heap::SpaceForAllocation(cid); 654 Heap::Space space = Heap::SpaceForAllocation(cid);
630 __ LoadIsolate(R6); 655 __ LoadIsolate(R6);
631 __ ldr(R6, Address(R6, Isolate::heap_offset())); 656 __ ldr(R6, Address(R6, Isolate::heap_offset()));
632 // Potential new object start. 657 // Potential new object start.
633 __ ldr(R0, Address(R6, Heap::TopOffset(space))); 658 __ ldr(R0, Address(R6, Heap::TopOffset(space)));
634 __ adds(R7, R0, Operand(R9)); // Potential next object start. 659 __ adds(R7, R0, Operand(R5)); // Potential next object start.
635 __ b(&slow_case, CS); // Branch if unsigned overflow. 660 __ b(&slow_case, CS); // Branch if unsigned overflow.
636 661
637 // Check if the allocation fits into the remaining space. 662 // Check if the allocation fits into the remaining space.
638 // R0: potential new object start. 663 // R0: potential new object start.
639 // R7: potential next object start. 664 // R7: potential next object start.
640 // R9: allocation size. 665 // R5: allocation size.
641 __ ldr(R3, Address(R6, Heap::EndOffset(space))); 666 __ ldr(R3, Address(R6, Heap::EndOffset(space)));
642 __ cmp(R7, Operand(R3)); 667 __ cmp(R7, Operand(R3));
643 __ b(&slow_case, CS); 668 __ b(&slow_case, CS);
644 669
645 // Successfully allocated the object(s), now update top to point to 670 // Successfully allocated the object(s), now update top to point to
646 // next object start and initialize the object. 671 // next object start and initialize the object.
647 __ LoadAllocationStatsAddress(R3, cid, /* inline_isolate = */ false); 672 __ LoadAllocationStatsAddress(R3, cid, /* inline_isolate = */ false);
648 __ str(R7, Address(R6, Heap::TopOffset(space))); 673 __ str(R7, Address(R6, Heap::TopOffset(space)));
649 __ add(R0, R0, Operand(kHeapObjectTag)); 674 __ add(R0, R0, Operand(kHeapObjectTag));
650 675
651 // Initialize the tags. 676 // Initialize the tags.
652 // R0: new object start as a tagged pointer. 677 // R0: new object start as a tagged pointer.
653 // R3: allocation stats address. 678 // R3: allocation stats address.
654 // R7: new object end address. 679 // R7: new object end address.
655 // R9: allocation size. 680 // R5: allocation size.
656 { 681 {
657 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; 682 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2;
658 683
659 __ CompareImmediate(R9, RawObject::SizeTag::kMaxSizeTag); 684 __ CompareImmediate(R5, RawObject::SizeTag::kMaxSizeTag);
660 __ mov(R6, Operand(R9, LSL, shift), LS); 685 __ mov(R6, Operand(R5, LSL, shift), LS);
661 __ mov(R6, Operand(0), HI); 686 __ mov(R6, Operand(0), HI);
662 687
663 // Get the class index and insert it into the tags. 688 // Get the class index and insert it into the tags.
664 // R6: size and bit tags. 689 // R6: size and bit tags.
665 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); 690 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid));
666 __ orr(R6, R6, Operand(TMP)); 691 __ orr(R6, R6, Operand(TMP));
667 __ str(R6, FieldAddress(R0, Array::tags_offset())); // Store tags. 692 __ str(R6, FieldAddress(R0, Array::tags_offset())); // Store tags.
668 } 693 }
669 694
670 // R0: new object start as a tagged pointer. 695 // R0: new object start as a tagged pointer.
671 // R7: new object end address. 696 // R7: new object end address.
672 // Store the type argument field. 697 // Store the type argument field.
673 __ InitializeFieldNoBarrier(R0, 698 __ InitializeFieldNoBarrier(R0,
674 FieldAddress(R0, Array::type_arguments_offset()), 699 FieldAddress(R0, Array::type_arguments_offset()),
675 R1); 700 R1);
676 701
677 // Set the length field. 702 // Set the length field.
678 __ InitializeFieldNoBarrier(R0, 703 __ InitializeFieldNoBarrier(R0,
679 FieldAddress(R0, Array::length_offset()), 704 FieldAddress(R0, Array::length_offset()),
680 R2); 705 R2);
681 706
682 // Initialize all array elements to raw_null. 707 // Initialize all array elements to raw_null.
683 // R0: new object start as a tagged pointer. 708 // R0: new object start as a tagged pointer.
684 // R3: allocation stats address. 709 // R3: allocation stats address.
685 // R4, R5: null 710 // R4, R5: null
686 // R6: iterator which initially points to the start of the variable 711 // R6: iterator which initially points to the start of the variable
687 // data area to be initialized. 712 // data area to be initialized.
688 // R7: new object end address. 713 // R7: new object end address.
689 // R9: allocation size. 714 // R5: allocation size.
715 __ IncrementAllocationStatsWithSize(R3, R5, space);
690 716
691 __ LoadImmediate(R4, reinterpret_cast<intptr_t>(Object::null())); 717 __ LoadImmediate(R4, reinterpret_cast<intptr_t>(Object::null()));
692 __ mov(R5, Operand(R4)); 718 __ mov(R5, Operand(R4));
693 __ AddImmediate(R6, R0, sizeof(RawArray) - kHeapObjectTag); 719 __ AddImmediate(R6, R0, sizeof(RawArray) - kHeapObjectTag);
694 __ InitializeFieldsNoBarrier(R0, R6, R7, R4, R5); 720 __ InitializeFieldsNoBarrier(R0, R6, R7, R4, R5);
695 __ IncrementAllocationStatsWithSize(R3, R9, space);
696 __ Ret(); // Returns the newly allocated object in R0. 721 __ Ret(); // Returns the newly allocated object in R0.
697 // Unable to allocate the array using the fast inline code, just call 722 // Unable to allocate the array using the fast inline code, just call
698 // into the runtime. 723 // into the runtime.
699 __ Bind(&slow_case); 724 __ Bind(&slow_case);
700 725
701 // Create a stub frame as we are pushing some objects on the stack before 726 // Create a stub frame as we are pushing some objects on the stack before
702 // calling into the runtime. 727 // calling into the runtime.
703 __ EnterStubFrame(); 728 __ EnterStubFrame();
704 __ LoadImmediate(IP, reinterpret_cast<intptr_t>(Object::null())); 729 __ LoadImmediate(IP, reinterpret_cast<intptr_t>(Object::null()));
705 // Setup space on stack for return value. 730 // Setup space on stack for return value.
706 // Push array length as Smi and element type. 731 // Push array length as Smi and element type.
707 __ PushList((1 << R1) | (1 << R2) | (1 << IP)); 732 __ PushList((1 << R1) | (1 << R2) | (1 << IP));
708 __ CallRuntime(kAllocateArrayRuntimeEntry, 2); 733 __ CallRuntime(kAllocateArrayRuntimeEntry, 2);
709 // Pop arguments; result is popped in IP. 734 // Pop arguments; result is popped in IP.
710 __ PopList((1 << R1) | (1 << R2) | (1 << IP)); // R2 is restored. 735 __ PopList((1 << R1) | (1 << R2) | (1 << IP)); // R2 is restored.
711 __ mov(R0, Operand(IP)); 736 __ mov(R0, Operand(IP));
712 __ LeaveStubFrame(); 737 __ LeaveStubFrame();
713 __ Ret(); 738 __ Ret();
714 } 739 }
715 740
716 741
717 // Called when invoking Dart code from C++ (VM code). 742 // Called when invoking Dart code from C++ (VM code).
718 // Input parameters: 743 // Input parameters:
719 // LR : points to return address. 744 // LR : points to return address.
720 // R0 : entrypoint of the Dart function to call. 745 // R0 : code object of the Dart function to call.
721 // R1 : arguments descriptor array. 746 // R1 : arguments descriptor array.
722 // R2 : arguments array. 747 // R2 : arguments array.
723 // R3 : current thread. 748 // R3 : current thread.
724 void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) { 749 void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) {
725 // Save frame pointer coming in. 750 // Save frame pointer coming in.
726 __ EnterFrame((1 << FP) | (1 << LR), 0); 751 __ EnterFrame((1 << FP) | (1 << LR), 0);
727 752
728 // Save new context and C++ ABI callee-saved registers. 753 // Save new context and C++ ABI callee-saved registers.
729 __ PushList(kAbiPreservedCpuRegs); 754 __ PushList(kAbiPreservedCpuRegs);
730 755
731 const DRegister firstd = EvenDRegisterOf(kAbiFirstPreservedFpuReg); 756 const DRegister firstd = EvenDRegisterOf(kAbiFirstPreservedFpuReg);
732 if (TargetCPUFeatures::vfp_supported()) { 757 if (TargetCPUFeatures::vfp_supported()) {
733 ASSERT(2 * kAbiPreservedFpuRegCount < 16); 758 ASSERT(2 * kAbiPreservedFpuRegCount < 16);
734 // Save FPU registers. 2 D registers per Q register. 759 // Save FPU registers. 2 D registers per Q register.
735 __ vstmd(DB_W, SP, firstd, 2 * kAbiPreservedFpuRegCount); 760 __ vstmd(DB_W, SP, firstd, 2 * kAbiPreservedFpuRegCount);
736 } else { 761 } else {
737 __ sub(SP, SP, Operand(kAbiPreservedFpuRegCount * kFpuRegisterSize)); 762 __ sub(SP, SP, Operand(kAbiPreservedFpuRegCount * kFpuRegisterSize));
738 } 763 }
739 764
740 // We now load the pool pointer(PP) as we are about to invoke dart code and we
741 // could potentially invoke some intrinsic functions which need the PP to be
742 // set up.
743 __ LoadPoolPointer();
744
745 // Set up THR, which caches the current thread in Dart code. 765 // Set up THR, which caches the current thread in Dart code.
746 if (THR != R3) { 766 if (THR != R3) {
747 __ mov(THR, Operand(R3)); 767 __ mov(THR, Operand(R3));
748 } 768 }
749 __ LoadIsolate(R9); 769 __ LoadIsolate(R7);
750 770
751 // Save the current VMTag on the stack. 771 // Save the current VMTag on the stack.
752 __ LoadFromOffset(kWord, R5, R9, Isolate::vm_tag_offset()); 772 __ LoadFromOffset(kWord, R5, R7, Isolate::vm_tag_offset());
753 __ Push(R5); 773 __ Push(R5);
754 774
755 // Mark that the isolate is executing Dart code. 775 // Mark that the isolate is executing Dart code.
756 __ LoadImmediate(R5, VMTag::kDartTagId); 776 __ LoadImmediate(R5, VMTag::kDartTagId);
757 __ StoreToOffset(kWord, R5, R9, Isolate::vm_tag_offset()); 777 __ StoreToOffset(kWord, R5, R7, Isolate::vm_tag_offset());
758 778
759 // Save top resource and top exit frame info. Use R4-6 as temporary registers. 779 // Save top resource and top exit frame info. Use R4-6 as temporary registers.
760 // StackFrameIterator reads the top exit frame info saved in this frame. 780 // StackFrameIterator reads the top exit frame info saved in this frame.
761 __ LoadFromOffset(kWord, R5, THR, Thread::top_exit_frame_info_offset()); 781 __ LoadFromOffset(kWord, R5, THR, Thread::top_exit_frame_info_offset());
762 __ LoadFromOffset(kWord, R4, THR, Thread::top_resource_offset()); 782 __ LoadFromOffset(kWord, R4, THR, Thread::top_resource_offset());
763 __ LoadImmediate(R6, 0); 783 __ LoadImmediate(R6, 0);
764 __ StoreToOffset(kWord, R6, THR, Thread::top_resource_offset()); 784 __ StoreToOffset(kWord, R6, THR, Thread::top_resource_offset());
765 __ StoreToOffset(kWord, R6, THR, Thread::top_exit_frame_info_offset()); 785 __ StoreToOffset(kWord, R6, THR, Thread::top_exit_frame_info_offset());
766 786
767 // kExitLinkSlotFromEntryFp must be kept in sync with the code below. 787 // kExitLinkSlotFromEntryFp must be kept in sync with the code below.
(...skipping 21 matching lines...) Expand all
789 __ Bind(&push_arguments); 809 __ Bind(&push_arguments);
790 __ ldr(R3, Address(R2)); 810 __ ldr(R3, Address(R2));
791 __ Push(R3); 811 __ Push(R3);
792 __ AddImmediate(R2, kWordSize); 812 __ AddImmediate(R2, kWordSize);
793 __ AddImmediate(R1, 1); 813 __ AddImmediate(R1, 1);
794 __ cmp(R1, Operand(R5)); 814 __ cmp(R1, Operand(R5));
795 __ b(&push_arguments, LT); 815 __ b(&push_arguments, LT);
796 __ Bind(&done_push_arguments); 816 __ Bind(&done_push_arguments);
797 817
798 // Call the Dart code entrypoint. 818 // Call the Dart code entrypoint.
819 __ LoadImmediate(PP, 0); // GC safe value into PP.
820 __ ldr(CODE_REG, Address(R0, VMHandles::kOffsetOfRawPtrInHandle));
821 __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset()));
799 __ blx(R0); // R4 is the arguments descriptor array. 822 __ blx(R0); // R4 is the arguments descriptor array.
800 823
801 // Get rid of arguments pushed on the stack. 824 // Get rid of arguments pushed on the stack.
802 __ AddImmediate(SP, FP, kExitLinkSlotFromEntryFp * kWordSize); 825 __ AddImmediate(SP, FP, kExitLinkSlotFromEntryFp * kWordSize);
803 826
804 __ LoadIsolate(R9); 827 __ LoadIsolate(R7);
805 // Restore the saved top exit frame info and top resource back into the 828 // Restore the saved top exit frame info and top resource back into the
806 // Isolate structure. Uses R5 as a temporary register for this. 829 // Isolate structure. Uses R5 as a temporary register for this.
807 __ Pop(R5); 830 __ Pop(R5);
808 __ StoreToOffset(kWord, R5, THR, Thread::top_exit_frame_info_offset()); 831 __ StoreToOffset(kWord, R5, THR, Thread::top_exit_frame_info_offset());
809 __ Pop(R5); 832 __ Pop(R5);
810 __ StoreToOffset(kWord, R5, THR, Thread::top_resource_offset()); 833 __ StoreToOffset(kWord, R5, THR, Thread::top_resource_offset());
811 834
812 // Restore the current VMTag from the stack. 835 // Restore the current VMTag from the stack.
813 __ Pop(R4); 836 __ Pop(R4);
814 __ StoreToOffset(kWord, R4, R9, Isolate::vm_tag_offset()); 837 __ StoreToOffset(kWord, R4, R7, Isolate::vm_tag_offset());
815 838
816 // Restore C++ ABI callee-saved registers. 839 // Restore C++ ABI callee-saved registers.
817 if (TargetCPUFeatures::vfp_supported()) { 840 if (TargetCPUFeatures::vfp_supported()) {
818 // Restore FPU registers. 2 D registers per Q register. 841 // Restore FPU registers. 2 D registers per Q register.
819 __ vldmd(IA_W, SP, firstd, 2 * kAbiPreservedFpuRegCount); 842 __ vldmd(IA_W, SP, firstd, 2 * kAbiPreservedFpuRegCount);
820 } else { 843 } else {
821 __ AddImmediate(SP, kAbiPreservedFpuRegCount * kFpuRegisterSize); 844 __ AddImmediate(SP, kAbiPreservedFpuRegCount * kFpuRegisterSize);
822 } 845 }
823 // Restore CPU registers. 846 // Restore CPU registers.
824 __ PopList(kAbiPreservedCpuRegs); 847 __ PopList(kAbiPreservedCpuRegs);
(...skipping 200 matching lines...) Expand 10 before | Expand all | Expand 10 after
1025 __ Ret(); 1048 __ Ret();
1026 } 1049 }
1027 1050
1028 1051
1029 // Called for inline allocation of objects. 1052 // Called for inline allocation of objects.
1030 // Input parameters: 1053 // Input parameters:
1031 // LR : return address. 1054 // LR : return address.
1032 // SP + 0 : type arguments object (only if class is parameterized). 1055 // SP + 0 : type arguments object (only if class is parameterized).
1033 // Returns patch_code_pc offset where patching code for disabling the stub 1056 // Returns patch_code_pc offset where patching code for disabling the stub
1034 // has been generated (similar to regularly generated Dart code). 1057 // has been generated (similar to regularly generated Dart code).
1035 void StubCode::GenerateAllocationStubForClass( 1058 void StubCode::GenerateAllocationStubForClass(Assembler* assembler,
1036 Assembler* assembler, const Class& cls, 1059 const Class& cls) {
1037 uword* entry_patch_offset, uword* patch_code_pc_offset) { 1060 // Must load pool pointer before being able to patch.
1038 *entry_patch_offset = assembler->CodeSize(); 1061 Register new_pp = R7;
1062 __ LoadPoolPointer(new_pp);
1039 // The generated code is different if the class is parameterized. 1063 // The generated code is different if the class is parameterized.
1040 const bool is_cls_parameterized = cls.NumTypeArguments() > 0; 1064 const bool is_cls_parameterized = cls.NumTypeArguments() > 0;
1041 ASSERT(!is_cls_parameterized || 1065 ASSERT(!is_cls_parameterized ||
1042 (cls.type_arguments_field_offset() != Class::kNoTypeArguments)); 1066 (cls.type_arguments_field_offset() != Class::kNoTypeArguments));
1043 // kInlineInstanceSize is a constant used as a threshold for determining 1067 // kInlineInstanceSize is a constant used as a threshold for determining
1044 // when the object initialization should be done as a loop or as 1068 // when the object initialization should be done as a loop or as
1045 // straight line code. 1069 // straight line code.
1046 const int kInlineInstanceSize = 12; 1070 const int kInlineInstanceSize = 12;
1047 const intptr_t instance_size = cls.instance_size(); 1071 const intptr_t instance_size = cls.instance_size();
1048 ASSERT(instance_size > 0); 1072 ASSERT(instance_size > 0);
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
1154 // Push null type arguments. 1178 // Push null type arguments.
1155 __ Push(R2); 1179 __ Push(R2);
1156 } 1180 }
1157 __ CallRuntime(kAllocateObjectRuntimeEntry, 2); // Allocate object. 1181 __ CallRuntime(kAllocateObjectRuntimeEntry, 2); // Allocate object.
1158 __ Drop(2); // Pop arguments. 1182 __ Drop(2); // Pop arguments.
1159 __ Pop(R0); // Pop result (newly allocated object). 1183 __ Pop(R0); // Pop result (newly allocated object).
1160 // R0: new object 1184 // R0: new object
1161 // Restore the frame pointer. 1185 // Restore the frame pointer.
1162 __ LeaveStubFrame(); 1186 __ LeaveStubFrame();
1163 __ Ret(); 1187 __ Ret();
1164 *patch_code_pc_offset = assembler->CodeSize();
1165 __ BranchPatchable(*StubCode::FixAllocationStubTarget_entry());
1166 } 1188 }
1167 1189
1168 1190
1169 // Called for invoking "dynamic noSuchMethod(Invocation invocation)" function 1191 // Called for invoking "dynamic noSuchMethod(Invocation invocation)" function
1170 // from the entry code of a dart function after an error in passed argument 1192 // from the entry code of a dart function after an error in passed argument
1171 // name or number is detected. 1193 // name or number is detected.
1172 // Input parameters: 1194 // Input parameters:
1173 // LR : return address. 1195 // LR : return address.
1174 // SP : address of last argument. 1196 // SP : address of last argument.
1175 // R4: arguments descriptor array. 1197 // R4: arguments descriptor array.
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
1310 // - Check if 'num_args' (including receiver) match any IC data group. 1332 // - Check if 'num_args' (including receiver) match any IC data group.
1311 // - Match found -> jump to target. 1333 // - Match found -> jump to target.
1312 // - Match not found -> jump to IC miss. 1334 // - Match not found -> jump to IC miss.
1313 void StubCode::GenerateNArgsCheckInlineCacheStub( 1335 void StubCode::GenerateNArgsCheckInlineCacheStub(
1314 Assembler* assembler, 1336 Assembler* assembler,
1315 intptr_t num_args, 1337 intptr_t num_args,
1316 const RuntimeEntry& handle_ic_miss, 1338 const RuntimeEntry& handle_ic_miss,
1317 Token::Kind kind, 1339 Token::Kind kind,
1318 RangeCollectionMode range_collection_mode, 1340 RangeCollectionMode range_collection_mode,
1319 bool optimized) { 1341 bool optimized) {
1342 __ CheckCodePointer();
1320 ASSERT(num_args > 0); 1343 ASSERT(num_args > 0);
1321 #if defined(DEBUG) 1344 #if defined(DEBUG)
1322 { Label ok; 1345 { Label ok;
1323 // Check that the IC data array has NumArgsTested() == num_args. 1346 // Check that the IC data array has NumArgsTested() == num_args.
1324 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. 1347 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
1325 __ ldr(R6, FieldAddress(R5, ICData::state_bits_offset())); 1348 __ ldr(R6, FieldAddress(R5, ICData::state_bits_offset()));
1326 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed. 1349 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed.
1327 __ and_(R6, R6, Operand(ICData::NumArgsTestedMask())); 1350 __ and_(R6, R6, Operand(ICData::NumArgsTestedMask()));
1328 __ CompareImmediate(R6, num_args); 1351 __ CompareImmediate(R6, num_args);
1329 __ b(&ok, EQ); 1352 __ b(&ok, EQ);
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
1434 __ Push(IP); 1457 __ Push(IP);
1435 } 1458 }
1436 // Pass IC data object. 1459 // Pass IC data object.
1437 __ Push(R5); 1460 __ Push(R5);
1438 __ CallRuntime(handle_ic_miss, num_args + 1); 1461 __ CallRuntime(handle_ic_miss, num_args + 1);
1439 // Remove the call arguments pushed earlier, including the IC data object. 1462 // Remove the call arguments pushed earlier, including the IC data object.
1440 __ Drop(num_args + 1); 1463 __ Drop(num_args + 1);
1441 // Pop returned function object into R0. 1464 // Pop returned function object into R0.
1442 // Restore arguments descriptor array and IC data array. 1465 // Restore arguments descriptor array and IC data array.
1443 __ PopList((1 << R0) | (1 << R4) | (1 << R5)); 1466 __ PopList((1 << R0) | (1 << R4) | (1 << R5));
1467 if (range_collection_mode == kCollectRanges) {
1468 __ RestoreCodePointer();
1469 }
1444 __ LeaveStubFrame(); 1470 __ LeaveStubFrame();
1445 Label call_target_function; 1471 Label call_target_function;
1446 if (!FLAG_lazy_dispatchers) { 1472 if (!FLAG_lazy_dispatchers) {
1447 GenerateDispatcherCode(assembler, &call_target_function); 1473 GenerateDispatcherCode(assembler, &call_target_function);
1448 } else { 1474 } else {
1449 __ b(&call_target_function); 1475 __ b(&call_target_function);
1450 } 1476 }
1451 1477
1452 __ Bind(&found); 1478 __ Bind(&found);
1453 // R6: pointer to an IC data check group. 1479 // R6: pointer to an IC data check group.
(...skipping 17 matching lines...) Expand all
1471 __ ldr(R1, Address(SP, 0 * kWordSize)); 1497 __ ldr(R1, Address(SP, 0 * kWordSize));
1472 if (num_args == 2) { 1498 if (num_args == 2) {
1473 __ ldr(R3, Address(SP, 1 * kWordSize)); 1499 __ ldr(R3, Address(SP, 1 * kWordSize));
1474 } 1500 }
1475 __ EnterStubFrame(); 1501 __ EnterStubFrame();
1476 if (num_args == 2) { 1502 if (num_args == 2) {
1477 __ PushList((1 << R1) | (1 << R3) | (1 << R5)); 1503 __ PushList((1 << R1) | (1 << R3) | (1 << R5));
1478 } else { 1504 } else {
1479 __ PushList((1 << R1) | (1 << R5)); 1505 __ PushList((1 << R1) | (1 << R5));
1480 } 1506 }
1507 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
1481 __ blx(R2); 1508 __ blx(R2);
1482 1509
1483 Label done; 1510 Label done;
1484 __ ldr(R5, Address(FP, kFirstLocalSlotFromFp * kWordSize)); 1511 __ ldr(R5, Address(FP, kFirstLocalSlotFromFp * kWordSize));
1485 __ UpdateRangeFeedback(R0, 2, R5, R1, R4, &done); 1512 __ UpdateRangeFeedback(R0, 2, R5, R1, R4, &done);
1486 __ Bind(&done); 1513 __ Bind(&done);
1514 __ RestoreCodePointer();
1487 __ LeaveStubFrame(); 1515 __ LeaveStubFrame();
1488 __ Ret(); 1516 __ Ret();
1489 } else { 1517 } else {
1518 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
1490 __ bx(R2); 1519 __ bx(R2);
1491 } 1520 }
1492 1521
1493 if (FLAG_support_debugger && !optimized) { 1522 if (FLAG_support_debugger && !optimized) {
1494 __ Bind(&stepping); 1523 __ Bind(&stepping);
1495 __ EnterStubFrame(); 1524 __ EnterStubFrame();
1496 __ Push(R5); // Preserve IC data. 1525 __ Push(R5); // Preserve IC data.
1497 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); 1526 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
1498 __ Pop(R5); 1527 __ Pop(R5);
1528 __ RestoreCodePointer();
1499 __ LeaveStubFrame(); 1529 __ LeaveStubFrame();
1500 __ b(&done_stepping); 1530 __ b(&done_stepping);
1501 } 1531 }
1502 } 1532 }
1503 1533
1504 1534
1505 // Use inline cache data array to invoke the target or continue in inline 1535 // Use inline cache data array to invoke the target or continue in inline
1506 // cache miss handler. Stub for 1-argument check (receiver class). 1536 // cache miss handler. Stub for 1-argument check (receiver class).
1507 // LR: return address. 1537 // LR: return address.
1508 // R5: inline cache data object. 1538 // R5: inline cache data object.
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after
1639 __ adds(R1, R1, Operand(Smi::RawValue(1))); 1669 __ adds(R1, R1, Operand(Smi::RawValue(1)));
1640 __ LoadImmediate(R1, Smi::RawValue(Smi::kMaxValue), VS); // Overflow. 1670 __ LoadImmediate(R1, Smi::RawValue(Smi::kMaxValue), VS); // Overflow.
1641 __ StoreIntoSmiField(Address(R6, count_offset), R1); 1671 __ StoreIntoSmiField(Address(R6, count_offset), R1);
1642 } 1672 }
1643 1673
1644 // Load arguments descriptor into R4. 1674 // Load arguments descriptor into R4.
1645 __ ldr(R4, FieldAddress(R5, ICData::arguments_descriptor_offset())); 1675 __ ldr(R4, FieldAddress(R5, ICData::arguments_descriptor_offset()));
1646 1676
1647 // Get function and call it, if possible. 1677 // Get function and call it, if possible.
1648 __ LoadFromOffset(kWord, R0, R6, target_offset); 1678 __ LoadFromOffset(kWord, R0, R6, target_offset);
1679 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
1649 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset())); 1680 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset()));
1650 __ bx(R2); 1681 __ bx(R2);
1651 1682
1652 if (FLAG_support_debugger) { 1683 if (FLAG_support_debugger) {
1653 __ Bind(&stepping); 1684 __ Bind(&stepping);
1654 __ EnterStubFrame(); 1685 __ EnterStubFrame();
1655 __ Push(R5); // Preserve IC data. 1686 __ Push(R5); // Preserve IC data.
1656 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); 1687 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
1657 __ Pop(R5); 1688 __ Pop(R5);
1689 __ RestoreCodePointer();
1658 __ LeaveStubFrame(); 1690 __ LeaveStubFrame();
1659 __ b(&done_stepping); 1691 __ b(&done_stepping);
1660 } 1692 }
1661 } 1693 }
1662 1694
1663 1695
1664 void StubCode::GenerateOneArgUnoptimizedStaticCallStub(Assembler* assembler) { 1696 void StubCode::GenerateOneArgUnoptimizedStaticCallStub(Assembler* assembler) {
1665 GenerateUsageCounterIncrement(assembler, R6); 1697 GenerateUsageCounterIncrement(assembler, R6);
1666 GenerateNArgsCheckInlineCacheStub( 1698 GenerateNArgsCheckInlineCacheStub(
1667 assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, 1699 assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
(...skipping 16 matching lines...) Expand all
1684 void StubCode::GenerateLazyCompileStub(Assembler* assembler) { 1716 void StubCode::GenerateLazyCompileStub(Assembler* assembler) {
1685 // Preserve arg desc. and IC data object. 1717 // Preserve arg desc. and IC data object.
1686 __ EnterStubFrame(); 1718 __ EnterStubFrame();
1687 __ PushList((1 << R4) | (1 << R5)); 1719 __ PushList((1 << R4) | (1 << R5));
1688 __ Push(R0); // Pass function. 1720 __ Push(R0); // Pass function.
1689 __ CallRuntime(kCompileFunctionRuntimeEntry, 1); 1721 __ CallRuntime(kCompileFunctionRuntimeEntry, 1);
1690 __ Pop(R0); // Restore argument. 1722 __ Pop(R0); // Restore argument.
1691 __ PopList((1 << R4) | (1 << R5)); // Restore arg desc. and IC data. 1723 __ PopList((1 << R4) | (1 << R5)); // Restore arg desc. and IC data.
1692 __ LeaveStubFrame(); 1724 __ LeaveStubFrame();
1693 1725
1726 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
1694 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset())); 1727 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset()));
1695 __ bx(R2); 1728 __ bx(R2);
1696 } 1729 }
1697 1730
1698 1731
1699 // R5: Contains an ICData. 1732 // R5: Contains an ICData.
1700 void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) { 1733 void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) {
1701 __ EnterStubFrame(); 1734 __ EnterStubFrame();
1702 __ LoadImmediate(R0, reinterpret_cast<intptr_t>(Object::null())); 1735 __ LoadImmediate(R0, reinterpret_cast<intptr_t>(Object::null()));
1703 // Preserve arguments descriptor and make room for result. 1736 // Preserve arguments descriptor and make room for result.
1704 __ PushList((1 << R0) | (1 << R5)); 1737 __ PushList((1 << R0) | (1 << R5));
1705 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); 1738 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
1706 __ PopList((1 << R0) | (1 << R5)); 1739 __ PopList((1 << R0) | (1 << R5));
1707 __ LeaveStubFrame(); 1740 __ LeaveStubFrame();
1741 __ mov(CODE_REG, Operand(R0));
1742 __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset()));
1708 __ bx(R0); 1743 __ bx(R0);
1709 } 1744 }
1710 1745
1711 1746
1712 void StubCode::GenerateRuntimeCallBreakpointStub(Assembler* assembler) { 1747 void StubCode::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
1713 __ EnterStubFrame(); 1748 __ EnterStubFrame();
1714 __ LoadImmediate(R0, reinterpret_cast<intptr_t>(Object::null())); 1749 __ LoadImmediate(R0, reinterpret_cast<intptr_t>(Object::null()));
1715 // Make room for result. 1750 // Make room for result.
1716 __ PushList((1 << R0)); 1751 __ PushList((1 << R0));
1717 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); 1752 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
1718 __ PopList((1 << R0)); 1753 __ PopList((1 << CODE_REG));
1719 __ LeaveStubFrame(); 1754 __ LeaveStubFrame();
1755 __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset()));
1720 __ bx(R0); 1756 __ bx(R0);
1721 } 1757 }
1722 1758
1723 1759
1724 // Called only from unoptimized code. All relevant registers have been saved. 1760 // Called only from unoptimized code. All relevant registers have been saved.
1725 void StubCode::GenerateDebugStepCheckStub( 1761 void StubCode::GenerateDebugStepCheckStub(
1726 Assembler* assembler) { 1762 Assembler* assembler) {
1727 // Check single stepping. 1763 // Check single stepping.
1728 Label stepping, done_stepping; 1764 Label stepping, done_stepping;
1729 __ LoadIsolate(R1); 1765 __ LoadIsolate(R1);
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after
1889 void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) { 1925 void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) {
1890 __ EnterStubFrame(); 1926 __ EnterStubFrame();
1891 __ Push(R4); 1927 __ Push(R4);
1892 __ LoadImmediate(IP, reinterpret_cast<intptr_t>(Object::null())); 1928 __ LoadImmediate(IP, reinterpret_cast<intptr_t>(Object::null()));
1893 __ Push(IP); // Setup space on stack for return value. 1929 __ Push(IP); // Setup space on stack for return value.
1894 __ Push(R6); 1930 __ Push(R6);
1895 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1); 1931 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
1896 __ Pop(R0); // Discard argument. 1932 __ Pop(R0); // Discard argument.
1897 __ Pop(R0); // Get Code object 1933 __ Pop(R0); // Get Code object
1898 __ Pop(R4); // Restore argument descriptor. 1934 __ Pop(R4); // Restore argument descriptor.
1935 __ LeaveStubFrame();
1936 __ mov(CODE_REG, Operand(R0));
1899 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset())); 1937 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset()));
1900 __ LeaveStubFrame();
1901 __ bx(R0); 1938 __ bx(R0);
1902 __ bkpt(0); 1939 __ bkpt(0);
1903 } 1940 }
1904 1941
1905 1942
1906 // Does identical check (object references are equal or not equal) with special 1943 // Does identical check (object references are equal or not equal) with special
1907 // checks for boxed numbers. 1944 // checks for boxed numbers.
1908 // LR: return address. 1945 // LR: return address.
1909 // Return Zero condition flag set if equal. 1946 // Return Zero condition flag set if equal.
1910 // Note: A Mint cannot contain a value that would fit in Smi, a Bigint 1947 // Note: A Mint cannot contain a value that would fit in Smi, a Bigint
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
1992 const Register right = R0; 2029 const Register right = R0;
1993 __ ldr(left, Address(SP, 1 * kWordSize)); 2030 __ ldr(left, Address(SP, 1 * kWordSize));
1994 __ ldr(right, Address(SP, 0 * kWordSize)); 2031 __ ldr(right, Address(SP, 0 * kWordSize));
1995 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp); 2032 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp);
1996 __ Ret(); 2033 __ Ret();
1997 2034
1998 if (FLAG_support_debugger) { 2035 if (FLAG_support_debugger) {
1999 __ Bind(&stepping); 2036 __ Bind(&stepping);
2000 __ EnterStubFrame(); 2037 __ EnterStubFrame();
2001 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); 2038 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2039 __ RestoreCodePointer();
2002 __ LeaveStubFrame(); 2040 __ LeaveStubFrame();
2003 __ b(&done_stepping); 2041 __ b(&done_stepping);
2004 } 2042 }
2005 } 2043 }
2006 2044
2007 2045
2008 // Called from optimized code only. 2046 // Called from optimized code only.
2009 // LR: return address. 2047 // LR: return address.
2010 // SP + 4: left operand. 2048 // SP + 4: left operand.
2011 // SP + 0: right operand. 2049 // SP + 0: right operand.
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
2051 __ cmp(R4, Operand(R0)); 2089 __ cmp(R4, Operand(R0));
2052 __ b(&update, NE); 2090 __ b(&update, NE);
2053 2091
2054 __ Bind(&call_target_function); 2092 __ Bind(&call_target_function);
2055 // Call the target found in the cache. For a class id match, this is a 2093 // Call the target found in the cache. For a class id match, this is a
2056 // proper target for the given name and arguments descriptor. If the 2094 // proper target for the given name and arguments descriptor. If the
2057 // illegal class id was found, the target is a cache miss handler that can 2095 // illegal class id was found, the target is a cache miss handler that can
2058 // be invoked as a normal Dart function. 2096 // be invoked as a normal Dart function.
2059 __ add(IP, R2, Operand(R3, LSL, 2)); 2097 __ add(IP, R2, Operand(R3, LSL, 2));
2060 __ ldr(R0, FieldAddress(IP, base + kWordSize)); 2098 __ ldr(R0, FieldAddress(IP, base + kWordSize));
2099 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
2061 __ ldr(target, FieldAddress(R0, Function::entry_point_offset())); 2100 __ ldr(target, FieldAddress(R0, Function::entry_point_offset()));
2062 } 2101 }
2063 2102
2064 2103
2065 // Called from megamorphic calls. 2104 // Called from megamorphic calls.
2066 // R0: receiver. 2105 // R0: receiver.
2067 // R1: lookup cache. 2106 // R1: lookup cache.
2068 // Result: 2107 // Result:
2069 // R1: entry point. 2108 // R1: entry point.
2070 void StubCode::GenerateMegamorphicLookupStub(Assembler* assembler) { 2109 void StubCode::GenerateMegamorphicLookupStub(Assembler* assembler) {
2071 EmitMegamorphicLookup(assembler, R0, R1, R1); 2110 EmitMegamorphicLookup(assembler, R0, R1, R1);
2072 __ Ret(); 2111 __ Ret();
2073 } 2112 }
2074 2113
2075 } // namespace dart 2114 } // namespace dart
2076 2115
2077 #endif // defined TARGET_ARCH_ARM 2116 #endif // defined TARGET_ARCH_ARM
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698