Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(482)

Side by Side Diff: runtime/vm/stub_code_arm.cc

Issue 1192103004: VM: New calling convention for generated code. (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: preserve CODE_REG in ARM Integer_shl intrinsic. Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" 5 #include "vm/globals.h"
6 #if defined(TARGET_ARCH_ARM) 6 #if defined(TARGET_ARCH_ARM)
7 7
8 #include "vm/assembler.h" 8 #include "vm/assembler.h"
9 #include "vm/code_generator.h" 9 #include "vm/code_generator.h"
10 #include "vm/cpu.h" 10 #include "vm/cpu.h"
(...skipping 24 matching lines...) Expand all
35 // SP : address of last argument in argument array. 35 // SP : address of last argument in argument array.
36 // SP + 4*R4 - 4 : address of first argument in argument array. 36 // SP + 4*R4 - 4 : address of first argument in argument array.
37 // SP + 4*R4 : address of return value. 37 // SP + 4*R4 : address of return value.
38 // R5 : address of the runtime function to call. 38 // R5 : address of the runtime function to call.
39 // R4 : number of arguments to the call. 39 // R4 : number of arguments to the call.
40 void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) { 40 void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) {
41 const intptr_t thread_offset = NativeArguments::thread_offset(); 41 const intptr_t thread_offset = NativeArguments::thread_offset();
42 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); 42 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset();
43 const intptr_t argv_offset = NativeArguments::argv_offset(); 43 const intptr_t argv_offset = NativeArguments::argv_offset();
44 const intptr_t retval_offset = NativeArguments::retval_offset(); 44 const intptr_t retval_offset = NativeArguments::retval_offset();
45 const intptr_t exitframe_last_param_slot_from_fp = 2;
46 45
47 __ EnterStubFrame(); 46 __ EnterStubFrame();
48 47
49 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R9)) != 0); 48 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R7)) != 0);
50 __ LoadIsolate(R9); 49 __ LoadIsolate(R7);
51 50
52 // Save exit frame information to enable stack walking as we are about 51 // Save exit frame information to enable stack walking as we are about
53 // to transition to Dart VM C++ code. 52 // to transition to Dart VM C++ code.
54 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset()); 53 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset());
55 54
56 #if defined(DEBUG) 55 #if defined(DEBUG)
57 { Label ok; 56 { Label ok;
58 // Check that we are always entering from Dart code. 57 // Check that we are always entering from Dart code.
59 __ LoadFromOffset(kWord, R6, R9, Isolate::vm_tag_offset()); 58 __ LoadFromOffset(kWord, R6, R7, Isolate::vm_tag_offset());
60 __ CompareImmediate(R6, VMTag::kDartTagId); 59 __ CompareImmediate(R6, VMTag::kDartTagId);
61 __ b(&ok, EQ); 60 __ b(&ok, EQ);
62 __ Stop("Not coming from Dart code."); 61 __ Stop("Not coming from Dart code.");
63 __ Bind(&ok); 62 __ Bind(&ok);
64 } 63 }
65 #endif 64 #endif
66 65
67 // Mark that the isolate is executing VM code. 66 // Mark that the isolate is executing VM code.
68 __ StoreToOffset(kWord, R5, R9, Isolate::vm_tag_offset()); 67 __ StoreToOffset(kWord, R5, R7, Isolate::vm_tag_offset());
69 68
70 // Reserve space for arguments and align frame before entering C++ world. 69 // Reserve space for arguments and align frame before entering C++ world.
71 // NativeArguments are passed in registers. 70 // NativeArguments are passed in registers.
72 ASSERT(sizeof(NativeArguments) == 4 * kWordSize); 71 ASSERT(sizeof(NativeArguments) == 4 * kWordSize);
73 __ ReserveAlignedFrameSpace(0); 72 __ ReserveAlignedFrameSpace(0);
74 73
75 // Pass NativeArguments structure by value and call runtime. 74 // Pass NativeArguments structure by value and call runtime.
76 // Registers R0, R1, R2, and R3 are used. 75 // Registers R0, R1, R2, and R3 are used.
77 76
78 ASSERT(thread_offset == 0 * kWordSize); 77 ASSERT(thread_offset == 0 * kWordSize);
79 // Set thread in NativeArgs. 78 // Set thread in NativeArgs.
80 __ mov(R0, Operand(THR)); 79 __ mov(R0, Operand(THR));
81 80
82 // There are no runtime calls to closures, so we do not need to set the tag 81 // There are no runtime calls to closures, so we do not need to set the tag
83 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. 82 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_.
84 ASSERT(argc_tag_offset == 1 * kWordSize); 83 ASSERT(argc_tag_offset == 1 * kWordSize);
85 __ mov(R1, Operand(R4)); // Set argc in NativeArguments. 84 __ mov(R1, Operand(R4)); // Set argc in NativeArguments.
86 85
87 ASSERT(argv_offset == 2 * kWordSize); 86 ASSERT(argv_offset == 2 * kWordSize);
88 __ add(R2, FP, Operand(R4, LSL, 2)); // Compute argv. 87 __ add(R2, FP, Operand(R4, LSL, 2)); // Compute argv.
89 // Set argv in NativeArguments. 88 // Set argv in NativeArguments.
90 __ AddImmediate(R2, exitframe_last_param_slot_from_fp * kWordSize); 89 __ AddImmediate(R2, kParamEndSlotFromFp * kWordSize);
91 90
92 ASSERT(retval_offset == 3 * kWordSize); 91 ASSERT(retval_offset == 3 * kWordSize);
93 __ add(R3, R2, Operand(kWordSize)); // Retval is next to 1st argument. 92 __ add(R3, R2, Operand(kWordSize)); // Retval is next to 1st argument.
94 93
95 // Call runtime or redirection via simulator. 94 // Call runtime or redirection via simulator.
96 __ blx(R5); 95 __ blx(R5);
97 96
98 // Mark that the isolate is executing Dart code. 97 // Mark that the isolate is executing Dart code.
99 __ LoadImmediate(R2, VMTag::kDartTagId); 98 __ LoadImmediate(R2, VMTag::kDartTagId);
100 __ StoreToOffset(kWord, R2, R9, Isolate::vm_tag_offset()); 99 __ StoreToOffset(kWord, R2, R7, Isolate::vm_tag_offset());
101 100
102 // Reset exit frame information in Isolate structure. 101 // Reset exit frame information in Isolate structure.
103 __ LoadImmediate(R2, 0); 102 __ LoadImmediate(R2, 0);
104 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset()); 103 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset());
105 104
106 __ LeaveStubFrame(); 105 __ LeaveStubFrame();
107 __ Ret(); 106 __ Ret();
108 } 107 }
109 108
110 109
(...skipping 23 matching lines...) Expand all
134 // R2 : address of first argument in argument array. 133 // R2 : address of first argument in argument array.
135 // R1 : argc_tag including number of arguments and function kind. 134 // R1 : argc_tag including number of arguments and function kind.
136 void StubCode::GenerateCallNativeCFunctionStub(Assembler* assembler) { 135 void StubCode::GenerateCallNativeCFunctionStub(Assembler* assembler) {
137 const intptr_t thread_offset = NativeArguments::thread_offset(); 136 const intptr_t thread_offset = NativeArguments::thread_offset();
138 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); 137 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset();
139 const intptr_t argv_offset = NativeArguments::argv_offset(); 138 const intptr_t argv_offset = NativeArguments::argv_offset();
140 const intptr_t retval_offset = NativeArguments::retval_offset(); 139 const intptr_t retval_offset = NativeArguments::retval_offset();
141 140
142 __ EnterStubFrame(); 141 __ EnterStubFrame();
143 142
144 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R9)) != 0); 143 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R7)) != 0);
145 __ LoadIsolate(R9); 144 __ LoadIsolate(R7);
146 145
147 // Save exit frame information to enable stack walking as we are about 146 // Save exit frame information to enable stack walking as we are about
148 // to transition to native code. 147 // to transition to native code.
149 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset()); 148 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset());
150 149
151 #if defined(DEBUG) 150 #if defined(DEBUG)
152 { Label ok; 151 { Label ok;
153 // Check that we are always entering from Dart code. 152 // Check that we are always entering from Dart code.
154 __ LoadFromOffset(kWord, R6, R9, Isolate::vm_tag_offset()); 153 __ LoadFromOffset(kWord, R6, R7, Isolate::vm_tag_offset());
155 __ CompareImmediate(R6, VMTag::kDartTagId); 154 __ CompareImmediate(R6, VMTag::kDartTagId);
156 __ b(&ok, EQ); 155 __ b(&ok, EQ);
157 __ Stop("Not coming from Dart code."); 156 __ Stop("Not coming from Dart code.");
158 __ Bind(&ok); 157 __ Bind(&ok);
159 } 158 }
160 #endif 159 #endif
161 160
162 // Mark that the isolate is executing Native code. 161 // Mark that the isolate is executing Native code.
163 __ StoreToOffset(kWord, R5, R9, Isolate::vm_tag_offset()); 162 __ StoreToOffset(kWord, R5, R7, Isolate::vm_tag_offset());
164 163
165 // Reserve space for the native arguments structure passed on the stack (the 164 // Reserve space for the native arguments structure passed on the stack (the
166 // outgoing pointer parameter to the native arguments structure is passed in 165 // outgoing pointer parameter to the native arguments structure is passed in
167 // R0) and align frame before entering the C++ world. 166 // R0) and align frame before entering the C++ world.
168 __ ReserveAlignedFrameSpace(sizeof(NativeArguments)); 167 __ ReserveAlignedFrameSpace(sizeof(NativeArguments));
169 168
170 // Initialize NativeArguments structure and call native function. 169 // Initialize NativeArguments structure and call native function.
171 // Registers R0, R1, R2, and R3 are used. 170 // Registers R0, R1, R2, and R3 are used.
172 171
173 ASSERT(thread_offset == 0 * kWordSize); 172 ASSERT(thread_offset == 0 * kWordSize);
174 // Set thread in NativeArgs. 173 // Set thread in NativeArgs.
175 __ mov(R0, Operand(THR)); 174 __ mov(R0, Operand(THR));
176 175
177 // There are no native calls to closures, so we do not need to set the tag 176 // There are no native calls to closures, so we do not need to set the tag
178 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. 177 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_.
179 ASSERT(argc_tag_offset == 1 * kWordSize); 178 ASSERT(argc_tag_offset == 1 * kWordSize);
180 // Set argc in NativeArguments: R1 already contains argc. 179 // Set argc in NativeArguments: R1 already contains argc.
181 180
182 ASSERT(argv_offset == 2 * kWordSize); 181 ASSERT(argv_offset == 2 * kWordSize);
183 // Set argv in NativeArguments: R2 already contains argv. 182 // Set argv in NativeArguments: R2 already contains argv.
184 183
185 ASSERT(retval_offset == 3 * kWordSize); 184 ASSERT(retval_offset == 3 * kWordSize);
186 __ add(R3, FP, Operand(3 * kWordSize)); // Set retval in NativeArgs. 185 // Set retval in NativeArgs.
186 __ add(R3, FP, Operand(kCallerSpSlotFromFp * kWordSize));
187 187
188 // Passing the structure by value as in runtime calls would require changing 188 // Passing the structure by value as in runtime calls would require changing
189 // Dart API for native functions. 189 // Dart API for native functions.
190 // For now, space is reserved on the stack and we pass a pointer to it. 190 // For now, space is reserved on the stack and we pass a pointer to it.
191 __ stm(IA, SP, (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3)); 191 __ stm(IA, SP, (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3));
192 __ mov(R0, Operand(SP)); // Pass the pointer to the NativeArguments. 192 __ mov(R0, Operand(SP)); // Pass the pointer to the NativeArguments.
193 193
194 __ mov(R1, Operand(R5)); // Pass the function entrypoint to call. 194 __ mov(R1, Operand(R5)); // Pass the function entrypoint to call.
195 // Call native function invocation wrapper or redirection via simulator. 195 // Call native function invocation wrapper or redirection via simulator.
196 #if defined(USING_SIMULATOR) 196 #if defined(USING_SIMULATOR)
197 uword entry = reinterpret_cast<uword>(NativeEntry::NativeCallWrapper); 197 uword entry = reinterpret_cast<uword>(NativeEntry::NativeCallWrapper);
198 entry = Simulator::RedirectExternalReference( 198 const ExternalLabel label(Simulator::RedirectExternalReference(
199 entry, Simulator::kNativeCall, NativeEntry::kNumCallWrapperArguments); 199 entry, Simulator::kNativeCall, NativeEntry::kNumCallWrapperArguments));
200 __ LoadImmediate(R2, entry); 200 __ LoadExternalLabel(R2, &label, kNotPatchable);
201 __ blx(R2); 201 __ blx(R2);
202 #else 202 #else
203 __ BranchLink(&NativeEntry::NativeCallWrapperLabel(), kNotPatchable); 203 __ LoadExternalLabel(
204 LR, &NativeEntry::NativeCallWrapperLabel(), kNotPatchable);
205 __ blx(LR);
204 #endif 206 #endif
205 207
206 // Mark that the isolate is executing Dart code. 208 // Mark that the isolate is executing Dart code.
207 __ LoadImmediate(R2, VMTag::kDartTagId); 209 __ LoadImmediate(R2, VMTag::kDartTagId);
208 __ StoreToOffset(kWord, R2, R9, Isolate::vm_tag_offset()); 210 __ StoreToOffset(kWord, R2, R7, Isolate::vm_tag_offset());
209 211
210 // Reset exit frame information in Isolate structure. 212 // Reset exit frame information in Isolate structure.
211 __ LoadImmediate(R2, 0); 213 __ LoadImmediate(R2, 0);
212 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset()); 214 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset());
213 215
214 __ LeaveStubFrame(); 216 __ LeaveStubFrame();
215 __ Ret(); 217 __ Ret();
216 } 218 }
217 219
218 220
219 // Input parameters: 221 // Input parameters:
220 // LR : return address. 222 // LR : return address.
221 // SP : address of return value. 223 // SP : address of return value.
222 // R5 : address of the native function to call. 224 // R5 : address of the native function to call.
223 // R2 : address of first argument in argument array. 225 // R2 : address of first argument in argument array.
224 // R1 : argc_tag including number of arguments and function kind. 226 // R1 : argc_tag including number of arguments and function kind.
225 void StubCode::GenerateCallBootstrapCFunctionStub(Assembler* assembler) { 227 void StubCode::GenerateCallBootstrapCFunctionStub(Assembler* assembler) {
226 const intptr_t thread_offset = NativeArguments::thread_offset(); 228 const intptr_t thread_offset = NativeArguments::thread_offset();
227 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); 229 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset();
228 const intptr_t argv_offset = NativeArguments::argv_offset(); 230 const intptr_t argv_offset = NativeArguments::argv_offset();
229 const intptr_t retval_offset = NativeArguments::retval_offset(); 231 const intptr_t retval_offset = NativeArguments::retval_offset();
230 232
231 __ EnterStubFrame(); 233 __ EnterStubFrame();
232 234
233 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R9)) != 0); 235 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R7)) != 0);
234 __ LoadIsolate(R9); 236 __ LoadIsolate(R7);
235 237
236 // Save exit frame information to enable stack walking as we are about 238 // Save exit frame information to enable stack walking as we are about
237 // to transition to native code. 239 // to transition to native code.
238 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset()); 240 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset());
239 241
240 #if defined(DEBUG) 242 #if defined(DEBUG)
241 { Label ok; 243 { Label ok;
242 // Check that we are always entering from Dart code. 244 // Check that we are always entering from Dart code.
243 __ LoadFromOffset(kWord, R6, R9, Isolate::vm_tag_offset()); 245 __ LoadFromOffset(kWord, R6, R7, Isolate::vm_tag_offset());
244 __ CompareImmediate(R6, VMTag::kDartTagId); 246 __ CompareImmediate(R6, VMTag::kDartTagId);
245 __ b(&ok, EQ); 247 __ b(&ok, EQ);
246 __ Stop("Not coming from Dart code."); 248 __ Stop("Not coming from Dart code.");
247 __ Bind(&ok); 249 __ Bind(&ok);
248 } 250 }
249 #endif 251 #endif
250 252
251 // Mark that the isolate is executing Native code. 253 // Mark that the isolate is executing Native code.
252 __ StoreToOffset(kWord, R5, R9, Isolate::vm_tag_offset()); 254 __ StoreToOffset(kWord, R5, R7, Isolate::vm_tag_offset());
253 255
254 // Reserve space for the native arguments structure passed on the stack (the 256 // Reserve space for the native arguments structure passed on the stack (the
255 // outgoing pointer parameter to the native arguments structure is passed in 257 // outgoing pointer parameter to the native arguments structure is passed in
256 // R0) and align frame before entering the C++ world. 258 // R0) and align frame before entering the C++ world.
257 __ ReserveAlignedFrameSpace(sizeof(NativeArguments)); 259 __ ReserveAlignedFrameSpace(sizeof(NativeArguments));
258 260
259 // Initialize NativeArguments structure and call native function. 261 // Initialize NativeArguments structure and call native function.
260 // Registers R0, R1, R2, and R3 are used. 262 // Registers R0, R1, R2, and R3 are used.
261 263
262 ASSERT(thread_offset == 0 * kWordSize); 264 ASSERT(thread_offset == 0 * kWordSize);
263 // Set thread in NativeArgs. 265 // Set thread in NativeArgs.
264 __ mov(R0, Operand(THR)); 266 __ mov(R0, Operand(THR));
265 267
266 // There are no native calls to closures, so we do not need to set the tag 268 // There are no native calls to closures, so we do not need to set the tag
267 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. 269 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_.
268 ASSERT(argc_tag_offset == 1 * kWordSize); 270 ASSERT(argc_tag_offset == 1 * kWordSize);
269 // Set argc in NativeArguments: R1 already contains argc. 271 // Set argc in NativeArguments: R1 already contains argc.
270 272
271 ASSERT(argv_offset == 2 * kWordSize); 273 ASSERT(argv_offset == 2 * kWordSize);
272 // Set argv in NativeArguments: R2 already contains argv. 274 // Set argv in NativeArguments: R2 already contains argv.
273 275
274 ASSERT(retval_offset == 3 * kWordSize); 276 ASSERT(retval_offset == 3 * kWordSize);
275 __ add(R3, FP, Operand(3 * kWordSize)); // Set retval in NativeArgs. 277 // Set retval in NativeArgs.
278 __ add(R3, FP, Operand(kCallerSpSlotFromFp * kWordSize));
276 279
277 // Passing the structure by value as in runtime calls would require changing 280 // Passing the structure by value as in runtime calls would require changing
278 // Dart API for native functions. 281 // Dart API for native functions.
279 // For now, space is reserved on the stack and we pass a pointer to it. 282 // For now, space is reserved on the stack and we pass a pointer to it.
280 __ stm(IA, SP, (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3)); 283 __ stm(IA, SP, (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3));
281 __ mov(R0, Operand(SP)); // Pass the pointer to the NativeArguments. 284 __ mov(R0, Operand(SP)); // Pass the pointer to the NativeArguments.
282 285
283 // Call native function or redirection via simulator. 286 // Call native function or redirection via simulator.
284 __ blx(R5); 287 __ blx(R5);
285 288
286 // Mark that the isolate is executing Dart code. 289 // Mark that the isolate is executing Dart code.
287 __ LoadImmediate(R2, VMTag::kDartTagId); 290 __ LoadImmediate(R2, VMTag::kDartTagId);
288 __ StoreToOffset(kWord, R2, R9, Isolate::vm_tag_offset()); 291 __ StoreToOffset(kWord, R2, R7, Isolate::vm_tag_offset());
289 292
290 // Reset exit frame information in Isolate structure. 293 // Reset exit frame information in Isolate structure.
291 __ LoadImmediate(R2, 0); 294 __ LoadImmediate(R2, 0);
292 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset()); 295 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset());
293 296
294 __ LeaveStubFrame(); 297 __ LeaveStubFrame();
295 __ Ret(); 298 __ Ret();
296 } 299 }
297 300
298 301
299 // Input parameters: 302 // Input parameters:
300 // R4: arguments descriptor array. 303 // R4: arguments descriptor array.
301 void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) { 304 void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) {
302 // Create a stub frame as we are pushing some objects on the stack before 305 // Create a stub frame as we are pushing some objects on the stack before
303 // calling into the runtime. 306 // calling into the runtime.
304 __ EnterStubFrame(); 307 __ EnterStubFrame();
305 // Setup space on stack for return value and preserve arguments descriptor. 308 // Setup space on stack for return value and preserve arguments descriptor.
306 __ LoadObject(R0, Object::null_object()); 309 __ LoadObject(R0, Object::null_object());
307 __ PushList((1 << R0) | (1 << R4)); 310 __ PushList((1 << R0) | (1 << R4));
308 __ CallRuntime(kPatchStaticCallRuntimeEntry, 0); 311 __ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
309 // Get Code object result and restore arguments descriptor array. 312 // Get Code object result and restore arguments descriptor array.
310 __ PopList((1 << R0) | (1 << R4)); 313 __ PopList((1 << R0) | (1 << R4));
311 // Remove the stub frame. 314 // Remove the stub frame.
312 __ LeaveStubFrame(); 315 __ LeaveStubFrame();
313 // Jump to the dart function. 316 // Jump to the dart function.
317 __ mov(CODE_REG, Operand(R0));
314 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset())); 318 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset()));
315 __ bx(R0); 319 __ bx(R0);
316 } 320 }
317 321
318 322
319 // Called from a static call only when an invalid code has been entered 323 // Called from a static call only when an invalid code has been entered
320 // (invalid because its function was optimized or deoptimized). 324 // (invalid because its function was optimized or deoptimized).
321 // R4: arguments descriptor array. 325 // R4: arguments descriptor array.
322 void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) { 326 void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) {
327 // Load code pointer to this stub from the thread:
328 // The one that is passed in, is not correct - it points to the code object
329 // that needs to be replaced.
330 __ ldr(CODE_REG, Address(THR, Thread::fix_callers_target_code_offset()));
323 // Create a stub frame as we are pushing some objects on the stack before 331 // Create a stub frame as we are pushing some objects on the stack before
324 // calling into the runtime. 332 // calling into the runtime.
325 __ EnterStubFrame(); 333 __ EnterStubFrame();
326 // Setup space on stack for return value and preserve arguments descriptor. 334 // Setup space on stack for return value and preserve arguments descriptor.
327 __ LoadObject(R0, Object::null_object()); 335 __ LoadObject(R0, Object::null_object());
328 __ PushList((1 << R0) | (1 << R4)); 336 __ PushList((1 << R0) | (1 << R4));
329 __ CallRuntime(kFixCallersTargetRuntimeEntry, 0); 337 __ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
330 // Get Code object result and restore arguments descriptor array. 338 // Get Code object result and restore arguments descriptor array.
331 __ PopList((1 << R0) | (1 << R4)); 339 __ PopList((1 << R0) | (1 << R4));
332 // Remove the stub frame. 340 // Remove the stub frame.
333 __ LeaveStubFrame(); 341 __ LeaveStubFrame();
334 // Jump to the dart function. 342 // Jump to the dart function.
343 __ mov(CODE_REG, Operand(R0));
335 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset())); 344 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset()));
336 __ bx(R0); 345 __ bx(R0);
337 } 346 }
338 347
339 348
340 // Called from object allocate instruction when the allocation stub has been 349 // Called from object allocate instruction when the allocation stub has been
341 // disabled. 350 // disabled.
342 void StubCode::GenerateFixAllocationStubTargetStub(Assembler* assembler) { 351 void StubCode::GenerateFixAllocationStubTargetStub(Assembler* assembler) {
352 // Load code pointer to this stub from the thread:
353 // The one that is passed in, is not correct - it points to the code object
354 // that needs to be replaced.
355 __ ldr(CODE_REG, Address(THR, Thread::fix_allocation_stub_code_offset()));
343 __ EnterStubFrame(); 356 __ EnterStubFrame();
344 // Setup space on stack for return value. 357 // Setup space on stack for return value.
345 __ LoadObject(R0, Object::null_object()); 358 __ LoadObject(R0, Object::null_object());
346 __ Push(R0); 359 __ Push(R0);
347 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0); 360 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
348 // Get Code object result. 361 // Get Code object result.
349 __ Pop(R0); 362 __ Pop(R0);
350 // Remove the stub frame. 363 // Remove the stub frame.
351 __ LeaveStubFrame(); 364 __ LeaveStubFrame();
352 // Jump to the dart function. 365 // Jump to the dart function.
366 __ mov(CODE_REG, Operand(R0));
353 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset())); 367 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset()));
354 __ bx(R0); 368 __ bx(R0);
355 } 369 }
356 370
357 371
358 // Input parameters: 372 // Input parameters:
359 // R2: smi-tagged argument count, may be zero. 373 // R2: smi-tagged argument count, may be zero.
360 // FP[kParamEndSlotFromFp + 1]: last argument. 374 // FP[kParamEndSlotFromFp + 1]: last argument.
361 static void PushArgumentsArray(Assembler* assembler) { 375 static void PushArgumentsArray(Assembler* assembler) {
362 // Allocate array to store arguments of caller. 376 // Allocate array to store arguments of caller.
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
396 // - Materialize objects that require allocation (e.g. Double instances). 410 // - Materialize objects that require allocation (e.g. Double instances).
397 // GC can occur only after frame is fully rewritten. 411 // GC can occur only after frame is fully rewritten.
398 // Stack after EnterFrame(...) below: 412 // Stack after EnterFrame(...) below:
399 // +------------------+ 413 // +------------------+
400 // | Saved PP | <- TOS 414 // | Saved PP | <- TOS
401 // +------------------+ 415 // +------------------+
402 // | Saved FP | <- FP of stub 416 // | Saved FP | <- FP of stub
403 // +------------------+ 417 // +------------------+
404 // | Saved LR | (deoptimization point) 418 // | Saved LR | (deoptimization point)
405 // +------------------+ 419 // +------------------+
406 // | PC marker | 420 // | pc marker |
421 // +------------------+
422 // | Saved CODE_REG |
407 // +------------------+ 423 // +------------------+
408 // | ... | <- SP of optimized frame 424 // | ... | <- SP of optimized frame
409 // 425 //
410 // Parts of the code cannot GC, part of the code can GC. 426 // Parts of the code cannot GC, part of the code can GC.
411 static void GenerateDeoptimizationSequence(Assembler* assembler, 427 static void GenerateDeoptimizationSequence(Assembler* assembler,
412 bool preserve_result) { 428 DeoptStubKind kind) {
413 // DeoptimizeCopyFrame expects a Dart frame, i.e. EnterDartFrame(0), but there 429 // DeoptimizeCopyFrame expects a Dart frame, i.e. EnterDartFrame(0), but there
414 // is no need to set the correct PC marker or load PP, since they get patched. 430 // is no need to set the correct PC marker or load PP, since they get patched.
415 431
416 // IP has the potentially live LR value. LR was clobbered by the call with 432 // IP has the potentially live LR value. LR was clobbered by the call with
417 // the return address, so move it into IP to set up the Dart frame. 433 // the return address, so move it into IP to set up the Dart frame.
418 __ eor(IP, IP, Operand(LR)); 434 __ eor(IP, IP, Operand(LR));
419 __ eor(LR, IP, Operand(LR)); 435 __ eor(LR, IP, Operand(LR));
420 __ eor(IP, IP, Operand(LR)); 436 __ eor(IP, IP, Operand(LR));
421 437
422 // Set up the frame manually. We can't use EnterFrame because we can't 438 // Set up the frame manually with return address now stored in IP.
423 // clobber LR (or any other register) with 0, yet. 439 __ EnterFrame((1 << PP) | (1 << CODE_REG) | (1 << FP) | (1 << IP), 0);
424 __ sub(SP, SP, Operand(kWordSize)); // Make room for PC marker of 0.
425 __ Push(IP); // Push return address.
426 __ Push(FP);
427 __ mov(FP, Operand(SP));
428 __ Push(PP);
429
430 __ LoadPoolPointer(); 440 __ LoadPoolPointer();
431 441
432 // Now that IP holding the return address has been written to the stack,
433 // we can clobber it with 0 to write the null PC marker.
434 __ mov(IP, Operand(0));
435 __ str(IP, Address(SP, +3 * kWordSize));
436
437 // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry 442 // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry
438 // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls. 443 // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
439 const intptr_t saved_result_slot_from_fp = 444 const intptr_t saved_result_slot_from_fp =
440 kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - R0); 445 kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - R0);
441 // Result in R0 is preserved as part of pushing all registers below. 446 // Result in R0 is preserved as part of pushing all registers below.
442 447
443 // Push registers in their enumeration order: lowest register number at 448 // Push registers in their enumeration order: lowest register number at
444 // lowest address. 449 // lowest address.
445 __ PushList(kAllCpuRegistersList); 450 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
451 if (i == CODE_REG) {
452 COMPILE_ASSERT(IP > CODE_REG); // Assert IP is pushed first.
453 __ ldr(IP, Address(FP, kCallerSpSlotFromFp * kWordSize));
rmacnak 2015/09/15 23:05:42 Add a comment that this special handling of CODE_R
Florian Schneider 2015/09/16 10:36:45 Done.
454 __ Push(IP);
455 } else {
456 __ Push(static_cast<Register>(i));
457 }
458 }
446 459
447 if (TargetCPUFeatures::vfp_supported()) { 460 if (TargetCPUFeatures::vfp_supported()) {
448 ASSERT(kFpuRegisterSize == 4 * kWordSize); 461 ASSERT(kFpuRegisterSize == 4 * kWordSize);
449 if (kNumberOfDRegisters > 16) { 462 if (kNumberOfDRegisters > 16) {
450 __ vstmd(DB_W, SP, D16, kNumberOfDRegisters - 16); 463 __ vstmd(DB_W, SP, D16, kNumberOfDRegisters - 16);
451 __ vstmd(DB_W, SP, D0, 16); 464 __ vstmd(DB_W, SP, D0, 16);
452 } else { 465 } else {
453 __ vstmd(DB_W, SP, D0, kNumberOfDRegisters); 466 __ vstmd(DB_W, SP, D0, kNumberOfDRegisters);
454 } 467 }
455 } else { 468 } else {
456 __ AddImmediate(SP, SP, -kNumberOfFpuRegisters * kFpuRegisterSize); 469 __ AddImmediate(SP, SP, -kNumberOfFpuRegisters * kFpuRegisterSize);
457 } 470 }
458 471
459 __ mov(R0, Operand(SP)); // Pass address of saved registers block. 472 __ mov(R0, Operand(SP)); // Pass address of saved registers block.
473 __ mov(R1, Operand(kind == kLazyDeopt ? 1 : 0));
460 __ ReserveAlignedFrameSpace(0); 474 __ ReserveAlignedFrameSpace(0);
461 __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 1); 475 __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2);
462 // Result (R0) is stack-size (FP - SP) in bytes. 476 // Result (R0) is stack-size (FP - SP) in bytes.
463 477
478 const bool preserve_result = (kind == kLazyDeopt);
464 if (preserve_result) { 479 if (preserve_result) {
465 // Restore result into R1 temporarily. 480 // Restore result into R1 temporarily.
466 __ ldr(R1, Address(FP, saved_result_slot_from_fp * kWordSize)); 481 __ ldr(R1, Address(FP, saved_result_slot_from_fp * kWordSize));
467 } 482 }
468 483
484 __ RestoreCodePointer();
469 __ LeaveDartFrame(); 485 __ LeaveDartFrame();
470 __ sub(SP, FP, Operand(R0)); 486 __ sub(SP, FP, Operand(R0));
471 487
472 // DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there 488 // DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there
473 // is no need to set the correct PC marker or load PP, since they get patched. 489 // is no need to set the correct PC marker or load PP, since they get patched.
474 __ EnterStubFrame(); 490 __ EnterStubFrame();
475 __ mov(R0, Operand(FP)); // Get last FP address. 491 __ mov(R0, Operand(FP)); // Get last FP address.
476 if (preserve_result) { 492 if (preserve_result) {
477 __ Push(R1); // Preserve result as first local. 493 __ Push(R1); // Preserve result as first local.
478 } 494 }
479 __ ReserveAlignedFrameSpace(0); 495 __ ReserveAlignedFrameSpace(0);
480 __ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1); // Pass last FP in R0. 496 __ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1); // Pass last FP in R0.
481 if (preserve_result) { 497 if (preserve_result) {
482 // Restore result into R1. 498 // Restore result into R1.
483 __ ldr(R1, Address(FP, kFirstLocalSlotFromFp * kWordSize)); 499 __ ldr(R1, Address(FP, kFirstLocalSlotFromFp * kWordSize));
484 } 500 }
485 // Code above cannot cause GC. 501 // Code above cannot cause GC.
502 __ RestoreCodePointer();
486 __ LeaveStubFrame(); 503 __ LeaveStubFrame();
487 504
488 // Frame is fully rewritten at this point and it is safe to perform a GC. 505 // Frame is fully rewritten at this point and it is safe to perform a GC.
489 // Materialize any objects that were deferred by FillFrame because they 506 // Materialize any objects that were deferred by FillFrame because they
490 // require allocation. 507 // require allocation.
491 // Enter stub frame with loading PP. The caller's PP is not materialized yet. 508 // Enter stub frame with loading PP. The caller's PP is not materialized yet.
492 __ EnterStubFrame(); 509 __ EnterStubFrame();
493 if (preserve_result) { 510 if (preserve_result) {
494 __ Push(R1); // Preserve result, it will be GC-d here. 511 __ Push(R1); // Preserve result, it will be GC-d here.
495 } 512 }
496 __ PushObject(Smi::ZoneHandle()); // Space for the result. 513 __ PushObject(Smi::ZoneHandle()); // Space for the result.
497 __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0); 514 __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0);
498 // Result tells stub how many bytes to remove from the expression stack 515 // Result tells stub how many bytes to remove from the expression stack
499 // of the bottom-most frame. They were used as materialization arguments. 516 // of the bottom-most frame. They were used as materialization arguments.
500 __ Pop(R1); 517 __ Pop(R1);
501 if (preserve_result) { 518 if (preserve_result) {
502 __ Pop(R0); // Restore result. 519 __ Pop(R0); // Restore result.
503 } 520 }
504 __ LeaveStubFrame(); 521 __ LeaveStubFrame();
505 // Remove materialization arguments. 522 // Remove materialization arguments.
506 __ add(SP, SP, Operand(R1, ASR, kSmiTagSize)); 523 __ add(SP, SP, Operand(R1, ASR, kSmiTagSize));
507 __ Ret(); 524 __ Ret();
508 } 525 }
509 526
510 527
511 void StubCode::GenerateDeoptimizeLazyStub(Assembler* assembler) { 528 void StubCode::GenerateDeoptimizeLazyStub(Assembler* assembler) {
512 // Correct return address to point just after the call that is being 529 // Correct return address to point just after the call that is being
513 // deoptimized. 530 // deoptimized.
514 __ AddImmediate(LR, -CallPattern::LengthInBytes()); 531 __ AddImmediate(LR, -CallPattern::DeoptCallPatternLengthInBytes());
515 GenerateDeoptimizationSequence(assembler, true); // Preserve R0. 532 // Push zap value instead of CODE_REG for lazy deopt.
533 __ LoadImmediate(IP, 0xf1f1f1f1);
534 __ Push(IP);
535 GenerateDeoptimizationSequence(assembler, kLazyDeopt);
516 } 536 }
517 537
518 538
519 void StubCode::GenerateDeoptimizeStub(Assembler* assembler) { 539 void StubCode::GenerateDeoptimizeStub(Assembler* assembler) {
520 GenerateDeoptimizationSequence(assembler, false); // Don't preserve R0. 540 GenerateDeoptimizationSequence(assembler, kEagerDeopt);
521 } 541 }
522 542
523 543
524 static void GenerateDispatcherCode(Assembler* assembler, 544 static void GenerateDispatcherCode(Assembler* assembler,
525 Label* call_target_function) { 545 Label* call_target_function) {
526 __ Comment("NoSuchMethodDispatch"); 546 __ Comment("NoSuchMethodDispatch");
527 // When lazily generated invocation dispatchers are disabled, the 547 // When lazily generated invocation dispatchers are disabled, the
528 // miss-handler may return null. 548 // miss-handler may return null.
529 __ CompareObject(R0, Object::null_object()); 549 __ CompareObject(R0, Object::null_object());
530 __ b(call_target_function, NE); 550 __ b(call_target_function, NE);
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
566 __ LoadObject(IP, Object::null_object()); 586 __ LoadObject(IP, Object::null_object());
567 __ PushList((1 << R4) | (1 << R5) | (1 << R6) | (1 << IP)); 587 __ PushList((1 << R4) | (1 << R5) | (1 << R6) | (1 << IP));
568 __ CallRuntime(kMegamorphicCacheMissHandlerRuntimeEntry, 3); 588 __ CallRuntime(kMegamorphicCacheMissHandlerRuntimeEntry, 3);
569 // Remove arguments. 589 // Remove arguments.
570 __ Drop(3); 590 __ Drop(3);
571 __ Pop(R0); // Get result into R0 (target function). 591 __ Pop(R0); // Get result into R0 (target function).
572 592
573 // Restore IC data and arguments descriptor. 593 // Restore IC data and arguments descriptor.
574 __ PopList((1 << R4) | (1 << R5)); 594 __ PopList((1 << R4) | (1 << R5));
575 595
596 __ RestoreCodePointer();
576 __ LeaveStubFrame(); 597 __ LeaveStubFrame();
577 598
578 if (!FLAG_lazy_dispatchers) { 599 if (!FLAG_lazy_dispatchers) {
579 Label call_target_function; 600 Label call_target_function;
580 GenerateDispatcherCode(assembler, &call_target_function); 601 GenerateDispatcherCode(assembler, &call_target_function);
581 __ Bind(&call_target_function); 602 __ Bind(&call_target_function);
582 } 603 }
583 604
584 // Tail-call to target function. 605 // Tail-call to target function.
606 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
585 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset())); 607 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset()));
586 __ bx(R2); 608 __ bx(R2);
587 } 609 }
588 610
589 611
590 // Called for inline allocation of arrays. 612 // Called for inline allocation of arrays.
591 // Input parameters: 613 // Input parameters:
592 // LR: return address. 614 // LR: return address.
593 // R1: array element type (either NULL or an instantiated type). 615 // R1: array element type (either NULL or an instantiated type).
594 // R2: array length as Smi (must be preserved). 616 // R2: array length as Smi (must be preserved).
(...skipping 18 matching lines...) Expand all
613 const intptr_t max_len = 635 const intptr_t max_len =
614 reinterpret_cast<int32_t>(Smi::New(Array::kMaxElements)); 636 reinterpret_cast<int32_t>(Smi::New(Array::kMaxElements));
615 __ CompareImmediate(R3, max_len); 637 __ CompareImmediate(R3, max_len);
616 __ b(&slow_case, GT); 638 __ b(&slow_case, GT);
617 639
618 const intptr_t cid = kArrayCid; 640 const intptr_t cid = kArrayCid;
619 __ MaybeTraceAllocation(cid, R4, &slow_case, 641 __ MaybeTraceAllocation(cid, R4, &slow_case,
620 /* inline_isolate = */ false); 642 /* inline_isolate = */ false);
621 643
622 const intptr_t fixed_size = sizeof(RawArray) + kObjectAlignment - 1; 644 const intptr_t fixed_size = sizeof(RawArray) + kObjectAlignment - 1;
623 __ LoadImmediate(R9, fixed_size); 645 __ LoadImmediate(R5, fixed_size);
624 __ add(R9, R9, Operand(R3, LSL, 1)); // R3 is a Smi. 646 __ add(R5, R5, Operand(R3, LSL, 1)); // R3 is a Smi.
625 ASSERT(kSmiTagShift == 1); 647 ASSERT(kSmiTagShift == 1);
626 __ bic(R9, R9, Operand(kObjectAlignment - 1)); 648 __ bic(R5, R5, Operand(kObjectAlignment - 1));
627 649
628 // R9: Allocation size. 650 // R5: Allocation size.
629 Heap::Space space = Heap::SpaceForAllocation(cid); 651 Heap::Space space = Heap::SpaceForAllocation(cid);
630 __ LoadIsolate(R6); 652 __ LoadIsolate(R6);
631 __ ldr(R6, Address(R6, Isolate::heap_offset())); 653 __ ldr(R6, Address(R6, Isolate::heap_offset()));
632 // Potential new object start. 654 // Potential new object start.
633 __ ldr(R0, Address(R6, Heap::TopOffset(space))); 655 __ ldr(R0, Address(R6, Heap::TopOffset(space)));
634 __ adds(R7, R0, Operand(R9)); // Potential next object start. 656 __ adds(R7, R0, Operand(R5)); // Potential next object start.
635 __ b(&slow_case, CS); // Branch if unsigned overflow. 657 __ b(&slow_case, CS); // Branch if unsigned overflow.
636 658
637 // Check if the allocation fits into the remaining space. 659 // Check if the allocation fits into the remaining space.
638 // R0: potential new object start. 660 // R0: potential new object start.
639 // R7: potential next object start. 661 // R7: potential next object start.
640 // R9: allocation size. 662 // R5: allocation size.
641 __ ldr(R3, Address(R6, Heap::EndOffset(space))); 663 __ ldr(R3, Address(R6, Heap::EndOffset(space)));
642 __ cmp(R7, Operand(R3)); 664 __ cmp(R7, Operand(R3));
643 __ b(&slow_case, CS); 665 __ b(&slow_case, CS);
644 666
645 // Successfully allocated the object(s), now update top to point to 667 // Successfully allocated the object(s), now update top to point to
646 // next object start and initialize the object. 668 // next object start and initialize the object.
647 __ LoadAllocationStatsAddress(R3, cid, /* inline_isolate = */ false); 669 __ LoadAllocationStatsAddress(R3, cid, /* inline_isolate = */ false);
648 __ str(R7, Address(R6, Heap::TopOffset(space))); 670 __ str(R7, Address(R6, Heap::TopOffset(space)));
649 __ add(R0, R0, Operand(kHeapObjectTag)); 671 __ add(R0, R0, Operand(kHeapObjectTag));
650 672
651 // Initialize the tags. 673 // Initialize the tags.
652 // R0: new object start as a tagged pointer. 674 // R0: new object start as a tagged pointer.
653 // R3: allocation stats address. 675 // R3: allocation stats address.
654 // R7: new object end address. 676 // R7: new object end address.
655 // R9: allocation size. 677 // R5: allocation size.
656 { 678 {
657 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; 679 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2;
658 680
659 __ CompareImmediate(R9, RawObject::SizeTag::kMaxSizeTag); 681 __ CompareImmediate(R5, RawObject::SizeTag::kMaxSizeTag);
660 __ mov(R6, Operand(R9, LSL, shift), LS); 682 __ mov(R6, Operand(R5, LSL, shift), LS);
661 __ mov(R6, Operand(0), HI); 683 __ mov(R6, Operand(0), HI);
662 684
663 // Get the class index and insert it into the tags. 685 // Get the class index and insert it into the tags.
664 // R6: size and bit tags. 686 // R6: size and bit tags.
665 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); 687 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid));
666 __ orr(R6, R6, Operand(TMP)); 688 __ orr(R6, R6, Operand(TMP));
667 __ str(R6, FieldAddress(R0, Array::tags_offset())); // Store tags. 689 __ str(R6, FieldAddress(R0, Array::tags_offset())); // Store tags.
668 } 690 }
669 691
670 // R0: new object start as a tagged pointer. 692 // R0: new object start as a tagged pointer.
671 // R7: new object end address. 693 // R7: new object end address.
672 // Store the type argument field. 694 // Store the type argument field.
673 __ InitializeFieldNoBarrier(R0, 695 __ InitializeFieldNoBarrier(R0,
674 FieldAddress(R0, Array::type_arguments_offset()), 696 FieldAddress(R0, Array::type_arguments_offset()),
675 R1); 697 R1);
676 698
677 // Set the length field. 699 // Set the length field.
678 __ InitializeFieldNoBarrier(R0, 700 __ InitializeFieldNoBarrier(R0,
679 FieldAddress(R0, Array::length_offset()), 701 FieldAddress(R0, Array::length_offset()),
680 R2); 702 R2);
681 703
682 // Initialize all array elements to raw_null. 704 // Initialize all array elements to raw_null.
683 // R0: new object start as a tagged pointer. 705 // R0: new object start as a tagged pointer.
684 // R3: allocation stats address. 706 // R3: allocation stats address.
685 // R4, R5: null 707 // R4, R5: null
686 // R6: iterator which initially points to the start of the variable 708 // R6: iterator which initially points to the start of the variable
687 // data area to be initialized. 709 // data area to be initialized.
688 // R7: new object end address. 710 // R7: new object end address.
689 // R9: allocation size. 711 // R5: allocation size.
712 __ IncrementAllocationStatsWithSize(R3, R5, space);
690 713
691 __ LoadObject(R4, Object::null_object()); 714 __ LoadObject(R4, Object::null_object());
692 __ mov(R5, Operand(R4)); 715 __ mov(R5, Operand(R4));
693 __ AddImmediate(R6, R0, sizeof(RawArray) - kHeapObjectTag); 716 __ AddImmediate(R6, R0, sizeof(RawArray) - kHeapObjectTag);
694 __ InitializeFieldsNoBarrier(R0, R6, R7, R4, R5); 717 __ InitializeFieldsNoBarrier(R0, R6, R7, R4, R5);
695 __ IncrementAllocationStatsWithSize(R3, R9, space);
696 __ Ret(); // Returns the newly allocated object in R0. 718 __ Ret(); // Returns the newly allocated object in R0.
697 // Unable to allocate the array using the fast inline code, just call 719 // Unable to allocate the array using the fast inline code, just call
698 // into the runtime. 720 // into the runtime.
699 __ Bind(&slow_case); 721 __ Bind(&slow_case);
700 722
701 // Create a stub frame as we are pushing some objects on the stack before 723 // Create a stub frame as we are pushing some objects on the stack before
702 // calling into the runtime. 724 // calling into the runtime.
703 __ EnterStubFrame(); 725 __ EnterStubFrame();
704 __ LoadObject(IP, Object::null_object()); 726 __ LoadObject(IP, Object::null_object());
705 // Setup space on stack for return value. 727 // Setup space on stack for return value.
706 // Push array length as Smi and element type. 728 // Push array length as Smi and element type.
707 __ PushList((1 << R1) | (1 << R2) | (1 << IP)); 729 __ PushList((1 << R1) | (1 << R2) | (1 << IP));
708 __ CallRuntime(kAllocateArrayRuntimeEntry, 2); 730 __ CallRuntime(kAllocateArrayRuntimeEntry, 2);
709 // Pop arguments; result is popped in IP. 731 // Pop arguments; result is popped in IP.
710 __ PopList((1 << R1) | (1 << R2) | (1 << IP)); // R2 is restored. 732 __ PopList((1 << R1) | (1 << R2) | (1 << IP)); // R2 is restored.
711 __ mov(R0, Operand(IP)); 733 __ mov(R0, Operand(IP));
712 __ LeaveStubFrame(); 734 __ LeaveStubFrame();
713 __ Ret(); 735 __ Ret();
714 } 736 }
715 737
716 738
717 // Called when invoking Dart code from C++ (VM code). 739 // Called when invoking Dart code from C++ (VM code).
718 // Input parameters: 740 // Input parameters:
719 // LR : points to return address. 741 // LR : points to return address.
720 // R0 : entrypoint of the Dart function to call. 742 // R0 : code object of the Dart function to call.
721 // R1 : arguments descriptor array. 743 // R1 : arguments descriptor array.
722 // R2 : arguments array. 744 // R2 : arguments array.
723 // R3 : current thread. 745 // R3 : current thread.
724 void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) { 746 void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) {
725 // Save frame pointer coming in. 747 // Save frame pointer coming in.
726 __ EnterFrame((1 << FP) | (1 << LR), 0); 748 __ EnterFrame((1 << FP) | (1 << LR), 0);
727 749
728 // Save new context and C++ ABI callee-saved registers. 750 // Save new context and C++ ABI callee-saved registers.
729 __ PushList(kAbiPreservedCpuRegs); 751 __ PushList(kAbiPreservedCpuRegs);
730 752
731 const DRegister firstd = EvenDRegisterOf(kAbiFirstPreservedFpuReg); 753 const DRegister firstd = EvenDRegisterOf(kAbiFirstPreservedFpuReg);
732 if (TargetCPUFeatures::vfp_supported()) { 754 if (TargetCPUFeatures::vfp_supported()) {
733 ASSERT(2 * kAbiPreservedFpuRegCount < 16); 755 ASSERT(2 * kAbiPreservedFpuRegCount < 16);
734 // Save FPU registers. 2 D registers per Q register. 756 // Save FPU registers. 2 D registers per Q register.
735 __ vstmd(DB_W, SP, firstd, 2 * kAbiPreservedFpuRegCount); 757 __ vstmd(DB_W, SP, firstd, 2 * kAbiPreservedFpuRegCount);
736 } else { 758 } else {
737 __ sub(SP, SP, Operand(kAbiPreservedFpuRegCount * kFpuRegisterSize)); 759 __ sub(SP, SP, Operand(kAbiPreservedFpuRegCount * kFpuRegisterSize));
738 } 760 }
739 761
740 // We now load the pool pointer(PP) as we are about to invoke dart code and we
741 // could potentially invoke some intrinsic functions which need the PP to be
742 // set up.
743 __ LoadPoolPointer();
744
745 // Set up THR, which caches the current thread in Dart code. 762 // Set up THR, which caches the current thread in Dart code.
746 if (THR != R3) { 763 if (THR != R3) {
747 __ mov(THR, Operand(R3)); 764 __ mov(THR, Operand(R3));
748 } 765 }
749 __ LoadIsolate(R9); 766 __ LoadIsolate(R7);
750 767
751 // Save the current VMTag on the stack. 768 // Save the current VMTag on the stack.
752 __ LoadFromOffset(kWord, R5, R9, Isolate::vm_tag_offset()); 769 __ LoadFromOffset(kWord, R5, R7, Isolate::vm_tag_offset());
753 __ Push(R5); 770 __ Push(R5);
754 771
755 // Mark that the isolate is executing Dart code. 772 // Mark that the isolate is executing Dart code.
756 __ LoadImmediate(R5, VMTag::kDartTagId); 773 __ LoadImmediate(R5, VMTag::kDartTagId);
757 __ StoreToOffset(kWord, R5, R9, Isolate::vm_tag_offset()); 774 __ StoreToOffset(kWord, R5, R7, Isolate::vm_tag_offset());
758 775
759 // Save top resource and top exit frame info. Use R4-6 as temporary registers. 776 // Save top resource and top exit frame info. Use R4-6 as temporary registers.
760 // StackFrameIterator reads the top exit frame info saved in this frame. 777 // StackFrameIterator reads the top exit frame info saved in this frame.
761 __ LoadFromOffset(kWord, R5, THR, Thread::top_exit_frame_info_offset()); 778 __ LoadFromOffset(kWord, R5, THR, Thread::top_exit_frame_info_offset());
762 __ LoadFromOffset(kWord, R4, THR, Thread::top_resource_offset()); 779 __ LoadFromOffset(kWord, R4, THR, Thread::top_resource_offset());
763 __ LoadImmediate(R6, 0); 780 __ LoadImmediate(R6, 0);
764 __ StoreToOffset(kWord, R6, THR, Thread::top_resource_offset()); 781 __ StoreToOffset(kWord, R6, THR, Thread::top_resource_offset());
765 __ StoreToOffset(kWord, R6, THR, Thread::top_exit_frame_info_offset()); 782 __ StoreToOffset(kWord, R6, THR, Thread::top_exit_frame_info_offset());
766 783
767 // kExitLinkSlotFromEntryFp must be kept in sync with the code below. 784 // kExitLinkSlotFromEntryFp must be kept in sync with the code below.
(...skipping 21 matching lines...) Expand all
789 __ Bind(&push_arguments); 806 __ Bind(&push_arguments);
790 __ ldr(R3, Address(R2)); 807 __ ldr(R3, Address(R2));
791 __ Push(R3); 808 __ Push(R3);
792 __ AddImmediate(R2, kWordSize); 809 __ AddImmediate(R2, kWordSize);
793 __ AddImmediate(R1, 1); 810 __ AddImmediate(R1, 1);
794 __ cmp(R1, Operand(R5)); 811 __ cmp(R1, Operand(R5));
795 __ b(&push_arguments, LT); 812 __ b(&push_arguments, LT);
796 __ Bind(&done_push_arguments); 813 __ Bind(&done_push_arguments);
797 814
798 // Call the Dart code entrypoint. 815 // Call the Dart code entrypoint.
816 __ LoadImmediate(PP, 0); // GC safe value into PP.
817 __ ldr(CODE_REG, Address(R0, VMHandles::kOffsetOfRawPtrInHandle));
818 __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset()));
799 __ blx(R0); // R4 is the arguments descriptor array. 819 __ blx(R0); // R4 is the arguments descriptor array.
800 820
801 // Get rid of arguments pushed on the stack. 821 // Get rid of arguments pushed on the stack.
802 __ AddImmediate(SP, FP, kExitLinkSlotFromEntryFp * kWordSize); 822 __ AddImmediate(SP, FP, kExitLinkSlotFromEntryFp * kWordSize);
803 823
804 __ LoadIsolate(R9); 824 __ LoadIsolate(R7);
805 // Restore the saved top exit frame info and top resource back into the 825 // Restore the saved top exit frame info and top resource back into the
806 // Isolate structure. Uses R5 as a temporary register for this. 826 // Isolate structure. Uses R5 as a temporary register for this.
807 __ Pop(R5); 827 __ Pop(R5);
808 __ StoreToOffset(kWord, R5, THR, Thread::top_exit_frame_info_offset()); 828 __ StoreToOffset(kWord, R5, THR, Thread::top_exit_frame_info_offset());
809 __ Pop(R5); 829 __ Pop(R5);
810 __ StoreToOffset(kWord, R5, THR, Thread::top_resource_offset()); 830 __ StoreToOffset(kWord, R5, THR, Thread::top_resource_offset());
811 831
812 // Restore the current VMTag from the stack. 832 // Restore the current VMTag from the stack.
813 __ Pop(R4); 833 __ Pop(R4);
814 __ StoreToOffset(kWord, R4, R9, Isolate::vm_tag_offset()); 834 __ StoreToOffset(kWord, R4, R7, Isolate::vm_tag_offset());
815 835
816 // Restore C++ ABI callee-saved registers. 836 // Restore C++ ABI callee-saved registers.
817 if (TargetCPUFeatures::vfp_supported()) { 837 if (TargetCPUFeatures::vfp_supported()) {
818 // Restore FPU registers. 2 D registers per Q register. 838 // Restore FPU registers. 2 D registers per Q register.
819 __ vldmd(IA_W, SP, firstd, 2 * kAbiPreservedFpuRegCount); 839 __ vldmd(IA_W, SP, firstd, 2 * kAbiPreservedFpuRegCount);
820 } else { 840 } else {
821 __ AddImmediate(SP, kAbiPreservedFpuRegCount * kFpuRegisterSize); 841 __ AddImmediate(SP, kAbiPreservedFpuRegCount * kFpuRegisterSize);
822 } 842 }
823 // Restore CPU registers. 843 // Restore CPU registers.
824 __ PopList(kAbiPreservedCpuRegs); 844 __ PopList(kAbiPreservedCpuRegs);
(...skipping 198 matching lines...) Expand 10 before | Expand all | Expand 10 after
1023 // Restore callee-saved registers, tear down frame. 1043 // Restore callee-saved registers, tear down frame.
1024 __ LeaveCallRuntimeFrame(); 1044 __ LeaveCallRuntimeFrame();
1025 __ Ret(); 1045 __ Ret();
1026 } 1046 }
1027 1047
1028 1048
1029 // Called for inline allocation of objects. 1049 // Called for inline allocation of objects.
1030 // Input parameters: 1050 // Input parameters:
1031 // LR : return address. 1051 // LR : return address.
1032 // SP + 0 : type arguments object (only if class is parameterized). 1052 // SP + 0 : type arguments object (only if class is parameterized).
1033 // Returns patch_code_pc offset where patching code for disabling the stub 1053 void StubCode::GenerateAllocationStubForClass(Assembler* assembler,
1034 // has been generated (similar to regularly generated Dart code). 1054 const Class& cls) {
1035 void StubCode::GenerateAllocationStubForClass( 1055 // Must load pool pointer before being able to patch.
1036 Assembler* assembler, const Class& cls, 1056 Register new_pp = R7;
1037 uword* entry_patch_offset, uword* patch_code_pc_offset) { 1057 __ LoadPoolPointer(new_pp);
1038 *entry_patch_offset = assembler->CodeSize();
1039 // The generated code is different if the class is parameterized. 1058 // The generated code is different if the class is parameterized.
1040 const bool is_cls_parameterized = cls.NumTypeArguments() > 0; 1059 const bool is_cls_parameterized = cls.NumTypeArguments() > 0;
1041 ASSERT(!is_cls_parameterized || 1060 ASSERT(!is_cls_parameterized ||
1042 (cls.type_arguments_field_offset() != Class::kNoTypeArguments)); 1061 (cls.type_arguments_field_offset() != Class::kNoTypeArguments));
1043 // kInlineInstanceSize is a constant used as a threshold for determining 1062 // kInlineInstanceSize is a constant used as a threshold for determining
1044 // when the object initialization should be done as a loop or as 1063 // when the object initialization should be done as a loop or as
1045 // straight line code. 1064 // straight line code.
1046 const int kInlineInstanceSize = 12; 1065 const int kInlineInstanceSize = 12;
1047 const intptr_t instance_size = cls.instance_size(); 1066 const intptr_t instance_size = cls.instance_size();
1048 ASSERT(instance_size > 0); 1067 ASSERT(instance_size > 0);
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
1154 // Push null type arguments. 1173 // Push null type arguments.
1155 __ Push(R2); 1174 __ Push(R2);
1156 } 1175 }
1157 __ CallRuntime(kAllocateObjectRuntimeEntry, 2); // Allocate object. 1176 __ CallRuntime(kAllocateObjectRuntimeEntry, 2); // Allocate object.
1158 __ Drop(2); // Pop arguments. 1177 __ Drop(2); // Pop arguments.
1159 __ Pop(R0); // Pop result (newly allocated object). 1178 __ Pop(R0); // Pop result (newly allocated object).
1160 // R0: new object 1179 // R0: new object
1161 // Restore the frame pointer. 1180 // Restore the frame pointer.
1162 __ LeaveStubFrame(); 1181 __ LeaveStubFrame();
1163 __ Ret(); 1182 __ Ret();
1164 *patch_code_pc_offset = assembler->CodeSize();
1165 __ BranchPatchable(*StubCode::FixAllocationStubTarget_entry());
1166 } 1183 }
1167 1184
1168 1185
1169 // Called for invoking "dynamic noSuchMethod(Invocation invocation)" function 1186 // Called for invoking "dynamic noSuchMethod(Invocation invocation)" function
1170 // from the entry code of a dart function after an error in passed argument 1187 // from the entry code of a dart function after an error in passed argument
1171 // name or number is detected. 1188 // name or number is detected.
1172 // Input parameters: 1189 // Input parameters:
1173 // LR : return address. 1190 // LR : return address.
1174 // SP : address of last argument. 1191 // SP : address of last argument.
1175 // R4: arguments descriptor array. 1192 // R4: arguments descriptor array.
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
1310 // - Check if 'num_args' (including receiver) match any IC data group. 1327 // - Check if 'num_args' (including receiver) match any IC data group.
1311 // - Match found -> jump to target. 1328 // - Match found -> jump to target.
1312 // - Match not found -> jump to IC miss. 1329 // - Match not found -> jump to IC miss.
1313 void StubCode::GenerateNArgsCheckInlineCacheStub( 1330 void StubCode::GenerateNArgsCheckInlineCacheStub(
1314 Assembler* assembler, 1331 Assembler* assembler,
1315 intptr_t num_args, 1332 intptr_t num_args,
1316 const RuntimeEntry& handle_ic_miss, 1333 const RuntimeEntry& handle_ic_miss,
1317 Token::Kind kind, 1334 Token::Kind kind,
1318 RangeCollectionMode range_collection_mode, 1335 RangeCollectionMode range_collection_mode,
1319 bool optimized) { 1336 bool optimized) {
1337 __ CheckCodePointer();
1320 ASSERT(num_args > 0); 1338 ASSERT(num_args > 0);
1321 #if defined(DEBUG) 1339 #if defined(DEBUG)
1322 { Label ok; 1340 { Label ok;
1323 // Check that the IC data array has NumArgsTested() == num_args. 1341 // Check that the IC data array has NumArgsTested() == num_args.
1324 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. 1342 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
1325 __ ldr(R6, FieldAddress(R5, ICData::state_bits_offset())); 1343 __ ldr(R6, FieldAddress(R5, ICData::state_bits_offset()));
1326 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed. 1344 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed.
1327 __ and_(R6, R6, Operand(ICData::NumArgsTestedMask())); 1345 __ and_(R6, R6, Operand(ICData::NumArgsTestedMask()));
1328 __ CompareImmediate(R6, num_args); 1346 __ CompareImmediate(R6, num_args);
1329 __ b(&ok, EQ); 1347 __ b(&ok, EQ);
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
1434 __ Push(IP); 1452 __ Push(IP);
1435 } 1453 }
1436 // Pass IC data object. 1454 // Pass IC data object.
1437 __ Push(R5); 1455 __ Push(R5);
1438 __ CallRuntime(handle_ic_miss, num_args + 1); 1456 __ CallRuntime(handle_ic_miss, num_args + 1);
1439 // Remove the call arguments pushed earlier, including the IC data object. 1457 // Remove the call arguments pushed earlier, including the IC data object.
1440 __ Drop(num_args + 1); 1458 __ Drop(num_args + 1);
1441 // Pop returned function object into R0. 1459 // Pop returned function object into R0.
1442 // Restore arguments descriptor array and IC data array. 1460 // Restore arguments descriptor array and IC data array.
1443 __ PopList((1 << R0) | (1 << R4) | (1 << R5)); 1461 __ PopList((1 << R0) | (1 << R4) | (1 << R5));
1462 if (range_collection_mode == kCollectRanges) {
1463 __ RestoreCodePointer();
1464 }
1444 __ LeaveStubFrame(); 1465 __ LeaveStubFrame();
1445 Label call_target_function; 1466 Label call_target_function;
1446 if (!FLAG_lazy_dispatchers) { 1467 if (!FLAG_lazy_dispatchers) {
1447 GenerateDispatcherCode(assembler, &call_target_function); 1468 GenerateDispatcherCode(assembler, &call_target_function);
1448 } else { 1469 } else {
1449 __ b(&call_target_function); 1470 __ b(&call_target_function);
1450 } 1471 }
1451 1472
1452 __ Bind(&found); 1473 __ Bind(&found);
1453 // R6: pointer to an IC data check group. 1474 // R6: pointer to an IC data check group.
(...skipping 17 matching lines...) Expand all
1471 __ ldr(R1, Address(SP, 0 * kWordSize)); 1492 __ ldr(R1, Address(SP, 0 * kWordSize));
1472 if (num_args == 2) { 1493 if (num_args == 2) {
1473 __ ldr(R3, Address(SP, 1 * kWordSize)); 1494 __ ldr(R3, Address(SP, 1 * kWordSize));
1474 } 1495 }
1475 __ EnterStubFrame(); 1496 __ EnterStubFrame();
1476 if (num_args == 2) { 1497 if (num_args == 2) {
1477 __ PushList((1 << R1) | (1 << R3) | (1 << R5)); 1498 __ PushList((1 << R1) | (1 << R3) | (1 << R5));
1478 } else { 1499 } else {
1479 __ PushList((1 << R1) | (1 << R5)); 1500 __ PushList((1 << R1) | (1 << R5));
1480 } 1501 }
1502 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
1481 __ blx(R2); 1503 __ blx(R2);
1482 1504
1483 Label done; 1505 Label done;
1484 __ ldr(R5, Address(FP, kFirstLocalSlotFromFp * kWordSize)); 1506 __ ldr(R5, Address(FP, kFirstLocalSlotFromFp * kWordSize));
1485 __ UpdateRangeFeedback(R0, 2, R5, R1, R4, &done); 1507 __ UpdateRangeFeedback(R0, 2, R5, R1, R4, &done);
1486 __ Bind(&done); 1508 __ Bind(&done);
1509 __ RestoreCodePointer();
1487 __ LeaveStubFrame(); 1510 __ LeaveStubFrame();
1488 __ Ret(); 1511 __ Ret();
1489 } else { 1512 } else {
1513 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
1490 __ bx(R2); 1514 __ bx(R2);
1491 } 1515 }
1492 1516
1493 if (FLAG_support_debugger && !optimized) { 1517 if (FLAG_support_debugger && !optimized) {
1494 __ Bind(&stepping); 1518 __ Bind(&stepping);
1495 __ EnterStubFrame(); 1519 __ EnterStubFrame();
1496 __ Push(R5); // Preserve IC data. 1520 __ Push(R5); // Preserve IC data.
1497 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); 1521 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
1498 __ Pop(R5); 1522 __ Pop(R5);
1523 __ RestoreCodePointer();
1499 __ LeaveStubFrame(); 1524 __ LeaveStubFrame();
1500 __ b(&done_stepping); 1525 __ b(&done_stepping);
1501 } 1526 }
1502 } 1527 }
1503 1528
1504 1529
1505 // Use inline cache data array to invoke the target or continue in inline 1530 // Use inline cache data array to invoke the target or continue in inline
1506 // cache miss handler. Stub for 1-argument check (receiver class). 1531 // cache miss handler. Stub for 1-argument check (receiver class).
1507 // LR: return address. 1532 // LR: return address.
1508 // R5: inline cache data object. 1533 // R5: inline cache data object.
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after
1639 __ adds(R1, R1, Operand(Smi::RawValue(1))); 1664 __ adds(R1, R1, Operand(Smi::RawValue(1)));
1640 __ LoadImmediate(R1, Smi::RawValue(Smi::kMaxValue), VS); // Overflow. 1665 __ LoadImmediate(R1, Smi::RawValue(Smi::kMaxValue), VS); // Overflow.
1641 __ StoreIntoSmiField(Address(R6, count_offset), R1); 1666 __ StoreIntoSmiField(Address(R6, count_offset), R1);
1642 } 1667 }
1643 1668
1644 // Load arguments descriptor into R4. 1669 // Load arguments descriptor into R4.
1645 __ ldr(R4, FieldAddress(R5, ICData::arguments_descriptor_offset())); 1670 __ ldr(R4, FieldAddress(R5, ICData::arguments_descriptor_offset()));
1646 1671
1647 // Get function and call it, if possible. 1672 // Get function and call it, if possible.
1648 __ LoadFromOffset(kWord, R0, R6, target_offset); 1673 __ LoadFromOffset(kWord, R0, R6, target_offset);
1674 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
1649 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset())); 1675 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset()));
1650 __ bx(R2); 1676 __ bx(R2);
1651 1677
1652 if (FLAG_support_debugger) { 1678 if (FLAG_support_debugger) {
1653 __ Bind(&stepping); 1679 __ Bind(&stepping);
1654 __ EnterStubFrame(); 1680 __ EnterStubFrame();
1655 __ Push(R5); // Preserve IC data. 1681 __ Push(R5); // Preserve IC data.
1656 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); 1682 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
1657 __ Pop(R5); 1683 __ Pop(R5);
1684 __ RestoreCodePointer();
1658 __ LeaveStubFrame(); 1685 __ LeaveStubFrame();
1659 __ b(&done_stepping); 1686 __ b(&done_stepping);
1660 } 1687 }
1661 } 1688 }
1662 1689
1663 1690
1664 void StubCode::GenerateOneArgUnoptimizedStaticCallStub(Assembler* assembler) { 1691 void StubCode::GenerateOneArgUnoptimizedStaticCallStub(Assembler* assembler) {
1665 GenerateUsageCounterIncrement(assembler, R6); 1692 GenerateUsageCounterIncrement(assembler, R6);
1666 GenerateNArgsCheckInlineCacheStub( 1693 GenerateNArgsCheckInlineCacheStub(
1667 assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, 1694 assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
(...skipping 16 matching lines...) Expand all
1684 void StubCode::GenerateLazyCompileStub(Assembler* assembler) { 1711 void StubCode::GenerateLazyCompileStub(Assembler* assembler) {
1685 // Preserve arg desc. and IC data object. 1712 // Preserve arg desc. and IC data object.
1686 __ EnterStubFrame(); 1713 __ EnterStubFrame();
1687 __ PushList((1 << R4) | (1 << R5)); 1714 __ PushList((1 << R4) | (1 << R5));
1688 __ Push(R0); // Pass function. 1715 __ Push(R0); // Pass function.
1689 __ CallRuntime(kCompileFunctionRuntimeEntry, 1); 1716 __ CallRuntime(kCompileFunctionRuntimeEntry, 1);
1690 __ Pop(R0); // Restore argument. 1717 __ Pop(R0); // Restore argument.
1691 __ PopList((1 << R4) | (1 << R5)); // Restore arg desc. and IC data. 1718 __ PopList((1 << R4) | (1 << R5)); // Restore arg desc. and IC data.
1692 __ LeaveStubFrame(); 1719 __ LeaveStubFrame();
1693 1720
1721 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
1694 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset())); 1722 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset()));
1695 __ bx(R2); 1723 __ bx(R2);
1696 } 1724 }
1697 1725
1698 1726
1699 // R5: Contains an ICData. 1727 // R5: Contains an ICData.
1700 void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) { 1728 void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) {
1701 __ EnterStubFrame(); 1729 __ EnterStubFrame();
1702 __ LoadObject(R0, Object::null_object()); 1730 __ LoadObject(R0, Object::null_object());
1703 // Preserve arguments descriptor and make room for result. 1731 // Preserve arguments descriptor and make room for result.
1704 __ PushList((1 << R0) | (1 << R5)); 1732 __ PushList((1 << R0) | (1 << R5));
1705 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); 1733 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
1706 __ PopList((1 << R0) | (1 << R5)); 1734 __ PopList((1 << R0) | (1 << R5));
1707 __ LeaveStubFrame(); 1735 __ LeaveStubFrame();
1736 __ mov(CODE_REG, Operand(R0));
1737 __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset()));
1708 __ bx(R0); 1738 __ bx(R0);
1709 } 1739 }
1710 1740
1711 1741
1712 void StubCode::GenerateRuntimeCallBreakpointStub(Assembler* assembler) { 1742 void StubCode::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
1713 __ EnterStubFrame(); 1743 __ EnterStubFrame();
1714 __ LoadObject(R0, Object::null_object()); 1744 __ LoadObject(R0, Object::null_object());
1715 // Make room for result. 1745 // Make room for result.
1716 __ PushList((1 << R0)); 1746 __ PushList((1 << R0));
1717 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); 1747 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
1718 __ PopList((1 << R0)); 1748 __ PopList((1 << CODE_REG));
1719 __ LeaveStubFrame(); 1749 __ LeaveStubFrame();
1750 __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset()));
1720 __ bx(R0); 1751 __ bx(R0);
1721 } 1752 }
1722 1753
1723 1754
1724 // Called only from unoptimized code. All relevant registers have been saved. 1755 // Called only from unoptimized code. All relevant registers have been saved.
1725 void StubCode::GenerateDebugStepCheckStub( 1756 void StubCode::GenerateDebugStepCheckStub(
1726 Assembler* assembler) { 1757 Assembler* assembler) {
1727 // Check single stepping. 1758 // Check single stepping.
1728 Label stepping, done_stepping; 1759 Label stepping, done_stepping;
1729 __ LoadIsolate(R1); 1760 __ LoadIsolate(R1);
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after
1889 void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) { 1920 void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) {
1890 __ EnterStubFrame(); 1921 __ EnterStubFrame();
1891 __ Push(R4); 1922 __ Push(R4);
1892 __ LoadObject(IP, Object::null_object()); 1923 __ LoadObject(IP, Object::null_object());
1893 __ Push(IP); // Setup space on stack for return value. 1924 __ Push(IP); // Setup space on stack for return value.
1894 __ Push(R6); 1925 __ Push(R6);
1895 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1); 1926 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
1896 __ Pop(R0); // Discard argument. 1927 __ Pop(R0); // Discard argument.
1897 __ Pop(R0); // Get Code object 1928 __ Pop(R0); // Get Code object
1898 __ Pop(R4); // Restore argument descriptor. 1929 __ Pop(R4); // Restore argument descriptor.
1930 __ LeaveStubFrame();
1931 __ mov(CODE_REG, Operand(R0));
1899 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset())); 1932 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset()));
1900 __ LeaveStubFrame();
1901 __ bx(R0); 1933 __ bx(R0);
1902 __ bkpt(0); 1934 __ bkpt(0);
1903 } 1935 }
1904 1936
1905 1937
1906 // Does identical check (object references are equal or not equal) with special 1938 // Does identical check (object references are equal or not equal) with special
1907 // checks for boxed numbers. 1939 // checks for boxed numbers.
1908 // LR: return address. 1940 // LR: return address.
1909 // Return Zero condition flag set if equal. 1941 // Return Zero condition flag set if equal.
1910 // Note: A Mint cannot contain a value that would fit in Smi, a Bigint 1942 // Note: A Mint cannot contain a value that would fit in Smi, a Bigint
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
1992 const Register right = R0; 2024 const Register right = R0;
1993 __ ldr(left, Address(SP, 1 * kWordSize)); 2025 __ ldr(left, Address(SP, 1 * kWordSize));
1994 __ ldr(right, Address(SP, 0 * kWordSize)); 2026 __ ldr(right, Address(SP, 0 * kWordSize));
1995 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp); 2027 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp);
1996 __ Ret(); 2028 __ Ret();
1997 2029
1998 if (FLAG_support_debugger) { 2030 if (FLAG_support_debugger) {
1999 __ Bind(&stepping); 2031 __ Bind(&stepping);
2000 __ EnterStubFrame(); 2032 __ EnterStubFrame();
2001 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); 2033 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2034 __ RestoreCodePointer();
2002 __ LeaveStubFrame(); 2035 __ LeaveStubFrame();
2003 __ b(&done_stepping); 2036 __ b(&done_stepping);
2004 } 2037 }
2005 } 2038 }
2006 2039
2007 2040
2008 // Called from optimized code only. 2041 // Called from optimized code only.
2009 // LR: return address. 2042 // LR: return address.
2010 // SP + 4: left operand. 2043 // SP + 4: left operand.
2011 // SP + 0: right operand. 2044 // SP + 0: right operand.
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
2051 __ cmp(R4, Operand(R0)); 2084 __ cmp(R4, Operand(R0));
2052 __ b(&update, NE); 2085 __ b(&update, NE);
2053 2086
2054 __ Bind(&call_target_function); 2087 __ Bind(&call_target_function);
2055 // Call the target found in the cache. For a class id match, this is a 2088 // Call the target found in the cache. For a class id match, this is a
2056 // proper target for the given name and arguments descriptor. If the 2089 // proper target for the given name and arguments descriptor. If the
2057 // illegal class id was found, the target is a cache miss handler that can 2090 // illegal class id was found, the target is a cache miss handler that can
2058 // be invoked as a normal Dart function. 2091 // be invoked as a normal Dart function.
2059 __ add(IP, R2, Operand(R3, LSL, 2)); 2092 __ add(IP, R2, Operand(R3, LSL, 2));
2060 __ ldr(R0, FieldAddress(IP, base + kWordSize)); 2093 __ ldr(R0, FieldAddress(IP, base + kWordSize));
2094 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
2061 __ ldr(target, FieldAddress(R0, Function::entry_point_offset())); 2095 __ ldr(target, FieldAddress(R0, Function::entry_point_offset()));
2062 } 2096 }
2063 2097
2064 2098
2065 // Called from megamorphic calls. 2099 // Called from megamorphic calls.
2066 // R0: receiver. 2100 // R0: receiver.
2067 // R1: lookup cache. 2101 // R1: lookup cache.
2068 // Result: 2102 // Result:
2069 // R1: entry point. 2103 // R1: entry point.
2070 void StubCode::GenerateMegamorphicLookupStub(Assembler* assembler) { 2104 void StubCode::GenerateMegamorphicLookupStub(Assembler* assembler) {
2071 EmitMegamorphicLookup(assembler, R0, R1, R1); 2105 EmitMegamorphicLookup(assembler, R0, R1, R1);
2072 __ Ret(); 2106 __ Ret();
2073 } 2107 }
2074 2108
2075 } // namespace dart 2109 } // namespace dart
2076 2110
2077 #endif // defined TARGET_ARCH_ARM 2111 #endif // defined TARGET_ARCH_ARM
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698