OLD | NEW |
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" | 5 #include "vm/globals.h" |
6 #if defined(TARGET_ARCH_MIPS) | 6 #if defined(TARGET_ARCH_MIPS) |
7 | 7 |
8 #include "vm/assembler.h" | 8 #include "vm/assembler.h" |
9 #include "vm/code_generator.h" | 9 #include "vm/code_generator.h" |
10 #include "vm/compiler.h" | 10 #include "vm/compiler.h" |
(...skipping 23 matching lines...) Expand all Loading... |
34 // SP : address of last argument in argument array. | 34 // SP : address of last argument in argument array. |
35 // SP + 4*S4 - 4 : address of first argument in argument array. | 35 // SP + 4*S4 - 4 : address of first argument in argument array. |
36 // SP + 4*S4 : address of return value. | 36 // SP + 4*S4 : address of return value. |
37 // S5 : address of the runtime function to call. | 37 // S5 : address of the runtime function to call. |
38 // S4 : number of arguments to the call. | 38 // S4 : number of arguments to the call. |
39 void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) { | 39 void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) { |
40 const intptr_t thread_offset = NativeArguments::thread_offset(); | 40 const intptr_t thread_offset = NativeArguments::thread_offset(); |
41 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); | 41 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); |
42 const intptr_t argv_offset = NativeArguments::argv_offset(); | 42 const intptr_t argv_offset = NativeArguments::argv_offset(); |
43 const intptr_t retval_offset = NativeArguments::retval_offset(); | 43 const intptr_t retval_offset = NativeArguments::retval_offset(); |
44 const intptr_t exitframe_last_param_slot_from_fp = 2; | |
45 | 44 |
46 __ SetPrologueOffset(); | 45 __ SetPrologueOffset(); |
47 __ Comment("CallToRuntimeStub"); | 46 __ Comment("CallToRuntimeStub"); |
48 __ EnterStubFrame(); | 47 __ EnterStubFrame(); |
49 | 48 |
50 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << S6)) != 0); | 49 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << S2)) != 0); |
51 __ LoadIsolate(S6); | 50 __ LoadIsolate(S2); |
52 | 51 |
53 // Save exit frame information to enable stack walking as we are about | 52 // Save exit frame information to enable stack walking as we are about |
54 // to transition to Dart VM C++ code. | 53 // to transition to Dart VM C++ code. |
55 __ sw(FP, Address(THR, Thread::top_exit_frame_info_offset())); | 54 __ sw(FP, Address(THR, Thread::top_exit_frame_info_offset())); |
56 | 55 |
57 #if defined(DEBUG) | 56 #if defined(DEBUG) |
58 { Label ok; | 57 { Label ok; |
59 // Check that we are always entering from Dart code. | 58 // Check that we are always entering from Dart code. |
60 __ lw(T0, Address(S6, Isolate::vm_tag_offset())); | 59 __ lw(T0, Address(S2, Isolate::vm_tag_offset())); |
61 __ BranchEqual(T0, Immediate(VMTag::kDartTagId), &ok); | 60 __ BranchEqual(T0, Immediate(VMTag::kDartTagId), &ok); |
62 __ Stop("Not coming from Dart code."); | 61 __ Stop("Not coming from Dart code."); |
63 __ Bind(&ok); | 62 __ Bind(&ok); |
64 } | 63 } |
65 #endif | 64 #endif |
66 | 65 |
67 // Mark that the isolate is executing VM code. | 66 // Mark that the isolate is executing VM code. |
68 __ sw(S5, Address(S6, Isolate::vm_tag_offset())); | 67 __ sw(S5, Address(S2, Isolate::vm_tag_offset())); |
69 | 68 |
70 // Reserve space for arguments and align frame before entering C++ world. | 69 // Reserve space for arguments and align frame before entering C++ world. |
71 // NativeArguments are passed in registers. | 70 // NativeArguments are passed in registers. |
72 ASSERT(sizeof(NativeArguments) == 4 * kWordSize); | 71 ASSERT(sizeof(NativeArguments) == 4 * kWordSize); |
73 __ ReserveAlignedFrameSpace(4 * kWordSize); // Reserve space for arguments. | 72 __ ReserveAlignedFrameSpace(4 * kWordSize); // Reserve space for arguments. |
74 | 73 |
75 // Pass NativeArguments structure by value and call runtime. | 74 // Pass NativeArguments structure by value and call runtime. |
76 // Registers A0, A1, A2, and A3 are used. | 75 // Registers A0, A1, A2, and A3 are used. |
77 | 76 |
78 ASSERT(thread_offset == 0 * kWordSize); | 77 ASSERT(thread_offset == 0 * kWordSize); |
79 // Set thread in NativeArgs. | 78 // Set thread in NativeArgs. |
80 __ mov(A0, THR); | 79 __ mov(A0, THR); |
81 | 80 |
82 // There are no runtime calls to closures, so we do not need to set the tag | 81 // There are no runtime calls to closures, so we do not need to set the tag |
83 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. | 82 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. |
84 ASSERT(argc_tag_offset == 1 * kWordSize); | 83 ASSERT(argc_tag_offset == 1 * kWordSize); |
85 __ mov(A1, S4); // Set argc in NativeArguments. | 84 __ mov(A1, S4); // Set argc in NativeArguments. |
86 | 85 |
87 ASSERT(argv_offset == 2 * kWordSize); | 86 ASSERT(argv_offset == 2 * kWordSize); |
88 __ sll(A2, S4, 2); | 87 __ sll(A2, S4, 2); |
89 __ addu(A2, FP, A2); // Compute argv. | 88 __ addu(A2, FP, A2); // Compute argv. |
90 // Set argv in NativeArguments. | 89 // Set argv in NativeArguments. |
91 __ addiu(A2, A2, Immediate(exitframe_last_param_slot_from_fp * kWordSize)); | 90 __ addiu(A2, A2, Immediate(kParamEndSlotFromFp * kWordSize)); |
92 | 91 |
93 | 92 |
94 // Call runtime or redirection via simulator. | 93 // Call runtime or redirection via simulator. |
95 // We defensively always jalr through T9 because it is sometimes required by | 94 // We defensively always jalr through T9 because it is sometimes required by |
96 // the MIPS ABI. | 95 // the MIPS ABI. |
97 __ mov(T9, S5); | 96 __ mov(T9, S5); |
98 __ jalr(T9); | 97 __ jalr(T9); |
99 | 98 |
100 ASSERT(retval_offset == 3 * kWordSize); | 99 ASSERT(retval_offset == 3 * kWordSize); |
101 // Retval is next to 1st argument. | 100 // Retval is next to 1st argument. |
102 __ delay_slot()->addiu(A3, A2, Immediate(kWordSize)); | 101 __ delay_slot()->addiu(A3, A2, Immediate(kWordSize)); |
103 __ Comment("CallToRuntimeStub return"); | 102 __ Comment("CallToRuntimeStub return"); |
104 | 103 |
105 // Mark that the isolate is executing Dart code. | 104 // Mark that the isolate is executing Dart code. |
106 __ LoadImmediate(A2, VMTag::kDartTagId); | 105 __ LoadImmediate(A2, VMTag::kDartTagId); |
107 __ sw(A2, Address(S6, Isolate::vm_tag_offset())); | 106 __ sw(A2, Address(S2, Isolate::vm_tag_offset())); |
108 | 107 |
109 // Reset exit frame information in Isolate structure. | 108 // Reset exit frame information in Isolate structure. |
110 __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); | 109 __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); |
111 | 110 |
112 __ LeaveStubFrameAndReturn(); | 111 __ LeaveStubFrameAndReturn(); |
113 } | 112 } |
114 | 113 |
115 | 114 |
116 // Print the stop message. | 115 // Print the stop message. |
117 DEFINE_LEAF_RUNTIME_ENTRY(void, PrintStopMessage, 1, const char* message) { | 116 DEFINE_LEAF_RUNTIME_ENTRY(void, PrintStopMessage, 1, const char* message) { |
(...skipping 23 matching lines...) Expand all Loading... |
141 void StubCode::GenerateCallNativeCFunctionStub(Assembler* assembler) { | 140 void StubCode::GenerateCallNativeCFunctionStub(Assembler* assembler) { |
142 const intptr_t thread_offset = NativeArguments::thread_offset(); | 141 const intptr_t thread_offset = NativeArguments::thread_offset(); |
143 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); | 142 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); |
144 const intptr_t argv_offset = NativeArguments::argv_offset(); | 143 const intptr_t argv_offset = NativeArguments::argv_offset(); |
145 const intptr_t retval_offset = NativeArguments::retval_offset(); | 144 const intptr_t retval_offset = NativeArguments::retval_offset(); |
146 | 145 |
147 __ SetPrologueOffset(); | 146 __ SetPrologueOffset(); |
148 __ Comment("CallNativeCFunctionStub"); | 147 __ Comment("CallNativeCFunctionStub"); |
149 __ EnterStubFrame(); | 148 __ EnterStubFrame(); |
150 | 149 |
151 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << S6)) != 0); | 150 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << S2)) != 0); |
152 __ LoadIsolate(S6); | 151 __ LoadIsolate(S2); |
153 | 152 |
154 // Save exit frame information to enable stack walking as we are about | 153 // Save exit frame information to enable stack walking as we are about |
155 // to transition to native code. | 154 // to transition to native code. |
156 __ sw(FP, Address(THR, Thread::top_exit_frame_info_offset())); | 155 __ sw(FP, Address(THR, Thread::top_exit_frame_info_offset())); |
157 | 156 |
158 #if defined(DEBUG) | 157 #if defined(DEBUG) |
159 { Label ok; | 158 { Label ok; |
160 // Check that we are always entering from Dart code. | 159 // Check that we are always entering from Dart code. |
161 __ lw(T0, Address(S6, Isolate::vm_tag_offset())); | 160 __ lw(T0, Address(S2, Isolate::vm_tag_offset())); |
162 __ BranchEqual(T0, Immediate(VMTag::kDartTagId), &ok); | 161 __ BranchEqual(T0, Immediate(VMTag::kDartTagId), &ok); |
163 __ Stop("Not coming from Dart code."); | 162 __ Stop("Not coming from Dart code."); |
164 __ Bind(&ok); | 163 __ Bind(&ok); |
165 } | 164 } |
166 #endif | 165 #endif |
167 | 166 |
168 // Mark that the isolate is executing Native code. | 167 // Mark that the isolate is executing Native code. |
169 __ sw(T5, Address(S6, Isolate::vm_tag_offset())); | 168 __ sw(T5, Address(S2, Isolate::vm_tag_offset())); |
170 | 169 |
171 // Initialize NativeArguments structure and call native function. | 170 // Initialize NativeArguments structure and call native function. |
172 // Registers A0, A1, A2, and A3 are used. | 171 // Registers A0, A1, A2, and A3 are used. |
173 | 172 |
174 ASSERT(thread_offset == 0 * kWordSize); | 173 ASSERT(thread_offset == 0 * kWordSize); |
175 // Set thread in NativeArgs. | 174 // Set thread in NativeArgs. |
176 __ mov(A0, THR); | 175 __ mov(A0, THR); |
177 | 176 |
178 // There are no native calls to closures, so we do not need to set the tag | 177 // There are no native calls to closures, so we do not need to set the tag |
179 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. | 178 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. |
180 ASSERT(argc_tag_offset == 1 * kWordSize); | 179 ASSERT(argc_tag_offset == 1 * kWordSize); |
181 // Set argc in NativeArguments: A1 already contains argc. | 180 // Set argc in NativeArguments: A1 already contains argc. |
182 | 181 |
183 ASSERT(argv_offset == 2 * kWordSize); | 182 ASSERT(argv_offset == 2 * kWordSize); |
184 // Set argv in NativeArguments: A2 already contains argv. | 183 // Set argv in NativeArguments: A2 already contains argv. |
185 | 184 |
186 ASSERT(retval_offset == 3 * kWordSize); | 185 ASSERT(retval_offset == 3 * kWordSize); |
187 __ addiu(A3, FP, Immediate(3 * kWordSize)); // Set retval in NativeArgs. | 186 // Set retval in NativeArgs. |
| 187 __ addiu(A3, FP, Immediate(kCallerSpSlotFromFp * kWordSize)); |
188 | 188 |
189 // Passing the structure by value as in runtime calls would require changing | 189 // Passing the structure by value as in runtime calls would require changing |
190 // Dart API for native functions. | 190 // Dart API for native functions. |
191 // For now, space is reserved on the stack and we pass a pointer to it. | 191 // For now, space is reserved on the stack and we pass a pointer to it. |
192 __ addiu(SP, SP, Immediate(-4 * kWordSize)); | 192 __ addiu(SP, SP, Immediate(-4 * kWordSize)); |
193 __ sw(A3, Address(SP, 3 * kWordSize)); | 193 __ sw(A3, Address(SP, 3 * kWordSize)); |
194 __ sw(A2, Address(SP, 2 * kWordSize)); | 194 __ sw(A2, Address(SP, 2 * kWordSize)); |
195 __ sw(A1, Address(SP, 1 * kWordSize)); | 195 __ sw(A1, Address(SP, 1 * kWordSize)); |
196 __ sw(A0, Address(SP, 0 * kWordSize)); | 196 __ sw(A0, Address(SP, 0 * kWordSize)); |
197 __ mov(A0, SP); // Pass the pointer to the NativeArguments. | 197 __ mov(A0, SP); // Pass the pointer to the NativeArguments. |
198 | 198 |
199 | 199 |
200 __ mov(A1, T5); // Pass the function entrypoint. | 200 __ mov(A1, T5); // Pass the function entrypoint. |
201 __ ReserveAlignedFrameSpace(2 * kWordSize); // Just passing A0, A1. | 201 __ ReserveAlignedFrameSpace(2 * kWordSize); // Just passing A0, A1. |
202 // Call native wrapper function or redirection via simulator. | 202 // Call native wrapper function or redirection via simulator. |
203 #if defined(USING_SIMULATOR) | 203 #if defined(USING_SIMULATOR) |
204 uword entry = reinterpret_cast<uword>(NativeEntry::NativeCallWrapper); | 204 uword entry = reinterpret_cast<uword>(NativeEntry::NativeCallWrapper); |
205 entry = Simulator::RedirectExternalReference( | 205 entry = Simulator::RedirectExternalReference( |
206 entry, Simulator::kNativeCall, NativeEntry::kNumCallWrapperArguments); | 206 entry, Simulator::kNativeCall, NativeEntry::kNumCallWrapperArguments); |
207 __ LoadImmediate(T9, entry); | 207 __ LoadImmediate(T9, entry); |
208 __ jalr(T9); | 208 __ jalr(T9); |
209 #else | 209 #else |
210 __ BranchLink(&NativeEntry::NativeCallWrapperLabel(), kNotPatchable); | 210 __ BranchLink(&NativeEntry::NativeCallWrapperLabel(), kNotPatchable); |
211 #endif | 211 #endif |
212 __ Comment("CallNativeCFunctionStub return"); | 212 __ Comment("CallNativeCFunctionStub return"); |
213 | 213 |
214 // Mark that the isolate is executing Dart code. | 214 // Mark that the isolate is executing Dart code. |
215 __ LoadImmediate(A2, VMTag::kDartTagId); | 215 __ LoadImmediate(A2, VMTag::kDartTagId); |
216 __ sw(A2, Address(S6, Isolate::vm_tag_offset())); | 216 __ sw(A2, Address(S2, Isolate::vm_tag_offset())); |
217 | 217 |
218 // Reset exit frame information in Isolate structure. | 218 // Reset exit frame information in Isolate structure. |
219 __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); | 219 __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); |
220 | 220 |
221 __ LeaveStubFrameAndReturn(); | 221 __ LeaveStubFrameAndReturn(); |
222 } | 222 } |
223 | 223 |
224 | 224 |
225 // Input parameters: | 225 // Input parameters: |
226 // RA : return address. | 226 // RA : return address. |
227 // SP : address of return value. | 227 // SP : address of return value. |
228 // T5 : address of the native function to call. | 228 // T5 : address of the native function to call. |
229 // A2 : address of first argument in argument array. | 229 // A2 : address of first argument in argument array. |
230 // A1 : argc_tag including number of arguments and function kind. | 230 // A1 : argc_tag including number of arguments and function kind. |
231 void StubCode::GenerateCallBootstrapCFunctionStub(Assembler* assembler) { | 231 void StubCode::GenerateCallBootstrapCFunctionStub(Assembler* assembler) { |
232 const intptr_t thread_offset = NativeArguments::thread_offset(); | 232 const intptr_t thread_offset = NativeArguments::thread_offset(); |
233 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); | 233 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); |
234 const intptr_t argv_offset = NativeArguments::argv_offset(); | 234 const intptr_t argv_offset = NativeArguments::argv_offset(); |
235 const intptr_t retval_offset = NativeArguments::retval_offset(); | 235 const intptr_t retval_offset = NativeArguments::retval_offset(); |
236 | 236 |
237 __ SetPrologueOffset(); | 237 __ SetPrologueOffset(); |
238 __ Comment("CallNativeCFunctionStub"); | 238 __ Comment("CallNativeCFunctionStub"); |
239 __ EnterStubFrame(); | 239 __ EnterStubFrame(); |
240 | 240 |
241 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << S6)) != 0); | 241 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << S2)) != 0); |
242 __ LoadIsolate(S6); | 242 __ LoadIsolate(S2); |
243 | 243 |
244 // Save exit frame information to enable stack walking as we are about | 244 // Save exit frame information to enable stack walking as we are about |
245 // to transition to native code. | 245 // to transition to native code. |
246 __ sw(FP, Address(THR, Thread::top_exit_frame_info_offset())); | 246 __ sw(FP, Address(THR, Thread::top_exit_frame_info_offset())); |
247 | 247 |
248 #if defined(DEBUG) | 248 #if defined(DEBUG) |
249 { Label ok; | 249 { Label ok; |
250 // Check that we are always entering from Dart code. | 250 // Check that we are always entering from Dart code. |
251 __ lw(T0, Address(S6, Isolate::vm_tag_offset())); | 251 __ lw(T0, Address(S2, Isolate::vm_tag_offset())); |
252 __ BranchEqual(T0, Immediate(VMTag::kDartTagId), &ok); | 252 __ BranchEqual(T0, Immediate(VMTag::kDartTagId), &ok); |
253 __ Stop("Not coming from Dart code."); | 253 __ Stop("Not coming from Dart code."); |
254 __ Bind(&ok); | 254 __ Bind(&ok); |
255 } | 255 } |
256 #endif | 256 #endif |
257 | 257 |
258 // Mark that the isolate is executing Native code. | 258 // Mark that the isolate is executing Native code. |
259 __ sw(T5, Address(S6, Isolate::vm_tag_offset())); | 259 __ sw(T5, Address(S2, Isolate::vm_tag_offset())); |
260 | 260 |
261 // Initialize NativeArguments structure and call native function. | 261 // Initialize NativeArguments structure and call native function. |
262 // Registers A0, A1, A2, and A3 are used. | 262 // Registers A0, A1, A2, and A3 are used. |
263 | 263 |
264 ASSERT(thread_offset == 0 * kWordSize); | 264 ASSERT(thread_offset == 0 * kWordSize); |
265 // Set thread in NativeArgs. | 265 // Set thread in NativeArgs. |
266 __ mov(A0, THR); | 266 __ mov(A0, THR); |
267 | 267 |
268 // There are no native calls to closures, so we do not need to set the tag | 268 // There are no native calls to closures, so we do not need to set the tag |
269 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. | 269 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. |
270 ASSERT(argc_tag_offset == 1 * kWordSize); | 270 ASSERT(argc_tag_offset == 1 * kWordSize); |
271 // Set argc in NativeArguments: A1 already contains argc. | 271 // Set argc in NativeArguments: A1 already contains argc. |
272 | 272 |
273 ASSERT(argv_offset == 2 * kWordSize); | 273 ASSERT(argv_offset == 2 * kWordSize); |
274 // Set argv in NativeArguments: A2 already contains argv. | 274 // Set argv in NativeArguments: A2 already contains argv. |
275 | 275 |
276 ASSERT(retval_offset == 3 * kWordSize); | 276 ASSERT(retval_offset == 3 * kWordSize); |
277 __ addiu(A3, FP, Immediate(3 * kWordSize)); // Set retval in NativeArgs. | 277 // Set retval in NativeArgs. |
| 278 __ addiu(A3, FP, Immediate(kCallerSpSlotFromFp * kWordSize)); |
278 | 279 |
279 // Passing the structure by value as in runtime calls would require changing | 280 // Passing the structure by value as in runtime calls would require changing |
280 // Dart API for native functions. | 281 // Dart API for native functions. |
281 // For now, space is reserved on the stack and we pass a pointer to it. | 282 // For now, space is reserved on the stack and we pass a pointer to it. |
282 __ addiu(SP, SP, Immediate(-4 * kWordSize)); | 283 __ addiu(SP, SP, Immediate(-4 * kWordSize)); |
283 __ sw(A3, Address(SP, 3 * kWordSize)); | 284 __ sw(A3, Address(SP, 3 * kWordSize)); |
284 __ sw(A2, Address(SP, 2 * kWordSize)); | 285 __ sw(A2, Address(SP, 2 * kWordSize)); |
285 __ sw(A1, Address(SP, 1 * kWordSize)); | 286 __ sw(A1, Address(SP, 1 * kWordSize)); |
286 __ sw(A0, Address(SP, 0 * kWordSize)); | 287 __ sw(A0, Address(SP, 0 * kWordSize)); |
287 __ mov(A0, SP); // Pass the pointer to the NativeArguments. | 288 __ mov(A0, SP); // Pass the pointer to the NativeArguments. |
288 | 289 |
289 __ ReserveAlignedFrameSpace(kWordSize); // Just passing A0. | 290 __ ReserveAlignedFrameSpace(kWordSize); // Just passing A0. |
290 | 291 |
291 // Call native function or redirection via simulator. | 292 // Call native function or redirection via simulator. |
292 | 293 |
293 // We defensively always jalr through T9 because it is sometimes required by | 294 // We defensively always jalr through T9 because it is sometimes required by |
294 // the MIPS ABI. | 295 // the MIPS ABI. |
295 __ mov(T9, T5); | 296 __ mov(T9, T5); |
296 __ jalr(T9); | 297 __ jalr(T9); |
297 __ Comment("CallNativeCFunctionStub return"); | 298 __ Comment("CallNativeCFunctionStub return"); |
298 | 299 |
299 // Mark that the isolate is executing Dart code. | 300 // Mark that the isolate is executing Dart code. |
300 __ LoadImmediate(A2, VMTag::kDartTagId); | 301 __ LoadImmediate(A2, VMTag::kDartTagId); |
301 __ sw(A2, Address(S6, Isolate::vm_tag_offset())); | 302 __ sw(A2, Address(S2, Isolate::vm_tag_offset())); |
302 | 303 |
303 // Reset exit frame information in Isolate structure. | 304 // Reset exit frame information in Isolate structure. |
304 __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); | 305 __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); |
305 | 306 |
306 __ LeaveStubFrameAndReturn(); | 307 __ LeaveStubFrameAndReturn(); |
307 } | 308 } |
308 | 309 |
309 | 310 |
310 // Input parameters: | 311 // Input parameters: |
311 // S4: arguments descriptor array. | 312 // S4: arguments descriptor array. |
312 void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) { | 313 void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) { |
313 __ Comment("CallStaticFunctionStub"); | 314 __ Comment("CallStaticFunctionStub"); |
314 __ EnterStubFrame(); | 315 __ EnterStubFrame(); |
315 // Setup space on stack for return value and preserve arguments descriptor. | 316 // Setup space on stack for return value and preserve arguments descriptor. |
316 | 317 |
317 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | 318 __ addiu(SP, SP, Immediate(-2 * kWordSize)); |
318 __ sw(S4, Address(SP, 1 * kWordSize)); | 319 __ sw(S4, Address(SP, 1 * kWordSize)); |
319 __ LoadObject(TMP, Object::null_object()); | 320 __ LoadObject(TMP, Object::null_object()); |
320 __ sw(TMP, Address(SP, 0 * kWordSize)); | 321 __ sw(TMP, Address(SP, 0 * kWordSize)); |
321 | 322 |
322 __ CallRuntime(kPatchStaticCallRuntimeEntry, 0); | 323 __ CallRuntime(kPatchStaticCallRuntimeEntry, 0); |
323 __ Comment("CallStaticFunctionStub return"); | 324 __ Comment("CallStaticFunctionStub return"); |
324 | 325 |
325 // Get Code object result and restore arguments descriptor array. | 326 // Get Code object result and restore arguments descriptor array. |
326 __ lw(T0, Address(SP, 0 * kWordSize)); | 327 __ lw(CODE_REG, Address(SP, 0 * kWordSize)); |
327 __ lw(S4, Address(SP, 1 * kWordSize)); | 328 __ lw(S4, Address(SP, 1 * kWordSize)); |
328 __ addiu(SP, SP, Immediate(2 * kWordSize)); | 329 __ addiu(SP, SP, Immediate(2 * kWordSize)); |
329 | 330 |
330 __ lw(T0, FieldAddress(T0, Code::entry_point_offset())); | 331 __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); |
331 | 332 |
332 // Remove the stub frame as we are about to jump to the dart function. | 333 // Remove the stub frame as we are about to jump to the dart function. |
333 __ LeaveStubFrameAndReturn(T0); | 334 __ LeaveStubFrameAndReturn(T0); |
334 } | 335 } |
335 | 336 |
336 | 337 |
337 // Called from a static call only when an invalid code has been entered | 338 // Called from a static call only when an invalid code has been entered |
338 // (invalid because its function was optimized or deoptimized). | 339 // (invalid because its function was optimized or deoptimized). |
339 // S4: arguments descriptor array. | 340 // S4: arguments descriptor array. |
340 void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) { | 341 void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) { |
| 342 // Load code pointer to this stub from the thread: |
| 343 // The one that is passed in, is not correct - it points to the code object |
| 344 // that needs to be replaced. |
| 345 __ lw(CODE_REG, Address(THR, Thread::fix_callers_target_code_offset())); |
341 // Create a stub frame as we are pushing some objects on the stack before | 346 // Create a stub frame as we are pushing some objects on the stack before |
342 // calling into the runtime. | 347 // calling into the runtime. |
343 __ Comment("FixCallersTarget"); | |
344 __ EnterStubFrame(); | 348 __ EnterStubFrame(); |
345 // Setup space on stack for return value and preserve arguments descriptor. | 349 // Setup space on stack for return value and preserve arguments descriptor. |
346 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | 350 __ addiu(SP, SP, Immediate(-2 * kWordSize)); |
347 __ sw(S4, Address(SP, 1 * kWordSize)); | 351 __ sw(S4, Address(SP, 1 * kWordSize)); |
348 __ LoadObject(TMP, Object::null_object()); | 352 __ LoadObject(TMP, Object::null_object()); |
349 __ sw(TMP, Address(SP, 0 * kWordSize)); | 353 __ sw(TMP, Address(SP, 0 * kWordSize)); |
350 __ CallRuntime(kFixCallersTargetRuntimeEntry, 0); | 354 __ CallRuntime(kFixCallersTargetRuntimeEntry, 0); |
351 // Get Code object result and restore arguments descriptor array. | 355 // Get Code object result and restore arguments descriptor array. |
352 __ lw(T0, Address(SP, 0 * kWordSize)); | 356 __ lw(CODE_REG, Address(SP, 0 * kWordSize)); |
353 __ lw(S4, Address(SP, 1 * kWordSize)); | 357 __ lw(S4, Address(SP, 1 * kWordSize)); |
354 __ addiu(SP, SP, Immediate(2 * kWordSize)); | 358 __ addiu(SP, SP, Immediate(2 * kWordSize)); |
355 | 359 |
356 // Jump to the dart function. | 360 // Jump to the dart function. |
357 __ lw(T0, FieldAddress(T0, Code::entry_point_offset())); | 361 __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); |
358 | 362 |
359 // Remove the stub frame. | 363 // Remove the stub frame. |
360 __ LeaveStubFrameAndReturn(T0); | 364 __ LeaveStubFrameAndReturn(T0); |
361 } | 365 } |
362 | 366 |
363 | 367 |
364 // Called from object allocate instruction when the allocation stub has been | 368 // Called from object allocate instruction when the allocation stub has been |
365 // disabled. | 369 // disabled. |
366 void StubCode::GenerateFixAllocationStubTargetStub(Assembler* assembler) { | 370 void StubCode::GenerateFixAllocationStubTargetStub(Assembler* assembler) { |
367 __ Comment("FixAllocationStubTarget"); | 371 // Load code pointer to this stub from the thread: |
| 372 // The one that is passed in, is not correct - it points to the code object |
| 373 // that needs to be replaced. |
| 374 __ lw(CODE_REG, Address(THR, Thread::fix_allocation_stub_code_offset())); |
368 __ EnterStubFrame(); | 375 __ EnterStubFrame(); |
369 // Setup space on stack for return value. | 376 // Setup space on stack for return value. |
370 __ addiu(SP, SP, Immediate(-1 * kWordSize)); | 377 __ addiu(SP, SP, Immediate(-1 * kWordSize)); |
371 __ LoadObject(TMP, Object::null_object()); | 378 __ LoadObject(TMP, Object::null_object()); |
372 __ sw(TMP, Address(SP, 0 * kWordSize)); | 379 __ sw(TMP, Address(SP, 0 * kWordSize)); |
373 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0); | 380 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0); |
374 // Get Code object result. | 381 // Get Code object result. |
375 __ lw(T0, Address(SP, 0 * kWordSize)); | 382 __ lw(CODE_REG, Address(SP, 0 * kWordSize)); |
376 __ addiu(SP, SP, Immediate(1 * kWordSize)); | 383 __ addiu(SP, SP, Immediate(1 * kWordSize)); |
377 | 384 |
378 // Jump to the dart function. | 385 // Jump to the dart function. |
379 __ lw(T0, FieldAddress(T0, Code::entry_point_offset())); | 386 __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); |
380 | 387 |
381 // Remove the stub frame. | 388 // Remove the stub frame. |
382 __ LeaveStubFrameAndReturn(T0); | 389 __ LeaveStubFrameAndReturn(T0); |
383 } | 390 } |
384 | 391 |
385 | 392 |
386 // Input parameters: | 393 // Input parameters: |
387 // A1: Smi-tagged argument count, may be zero. | 394 // A1: Smi-tagged argument count, may be zero. |
388 // FP[kParamEndSlotFromFp + 1]: Last argument. | 395 // FP[kParamEndSlotFromFp + 1]: Last argument. |
389 static void PushArgumentsArray(Assembler* assembler) { | 396 static void PushArgumentsArray(Assembler* assembler) { |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
426 // - Push all registers that can contain values. | 433 // - Push all registers that can contain values. |
427 // - Call C routine to copy the stack and saved registers into temporary buffer. | 434 // - Call C routine to copy the stack and saved registers into temporary buffer. |
428 // - Adjust caller's frame to correct unoptimized frame size. | 435 // - Adjust caller's frame to correct unoptimized frame size. |
429 // - Fill the unoptimized frame. | 436 // - Fill the unoptimized frame. |
430 // - Materialize objects that require allocation (e.g. Double instances). | 437 // - Materialize objects that require allocation (e.g. Double instances). |
431 // GC can occur only after frame is fully rewritten. | 438 // GC can occur only after frame is fully rewritten. |
432 // Stack after EnterFrame(...) below: | 439 // Stack after EnterFrame(...) below: |
433 // +------------------+ | 440 // +------------------+ |
434 // | Saved PP | <- TOS | 441 // | Saved PP | <- TOS |
435 // +------------------+ | 442 // +------------------+ |
| 443 // | Saved CODE_REG | |
| 444 // +------------------+ |
436 // | Saved FP | <- FP of stub | 445 // | Saved FP | <- FP of stub |
437 // +------------------+ | 446 // +------------------+ |
438 // | Saved LR | (deoptimization point) | 447 // | Saved LR | (deoptimization point) |
439 // +------------------+ | 448 // +------------------+ |
440 // | PC marker | | 449 // | Saved CODE_REG | |
441 // +------------------+ | 450 // +------------------+ |
442 // | ... | <- SP of optimized frame | 451 // | ... | <- SP of optimized frame |
443 // | 452 // |
444 // Parts of the code cannot GC, part of the code can GC. | 453 // Parts of the code cannot GC, part of the code can GC. |
445 static void GenerateDeoptimizationSequence(Assembler* assembler, | 454 static void GenerateDeoptimizationSequence(Assembler* assembler, |
446 bool preserve_result) { | 455 DeoptStubKind kind) { |
447 const intptr_t kPushedRegistersSize = | 456 const intptr_t kPushedRegistersSize = |
448 kNumberOfCpuRegisters * kWordSize + | 457 kNumberOfCpuRegisters * kWordSize + kNumberOfFRegisters * kWordSize; |
449 4 * kWordSize + // PP, FP, RA, PC marker. | |
450 kNumberOfFRegisters * kWordSize; | |
451 | 458 |
452 __ SetPrologueOffset(); | 459 __ SetPrologueOffset(); |
453 __ Comment("GenerateDeoptimizationSequence"); | 460 __ Comment("GenerateDeoptimizationSequence"); |
454 // DeoptimizeCopyFrame expects a Dart frame, i.e. EnterDartFrame(0), but there | 461 // DeoptimizeCopyFrame expects a Dart frame. |
455 // is no need to set the correct PC marker or load PP, since they get patched. | 462 __ EnterStubFrame(kPushedRegistersSize); |
456 __ addiu(SP, SP, Immediate(-kPushedRegistersSize * kWordSize)); | |
457 __ sw(ZR, Address(SP, kPushedRegistersSize - 1 * kWordSize)); | |
458 __ sw(RA, Address(SP, kPushedRegistersSize - 2 * kWordSize)); | |
459 __ sw(FP, Address(SP, kPushedRegistersSize - 3 * kWordSize)); | |
460 __ sw(PP, Address(SP, kPushedRegistersSize - 4 * kWordSize)); | |
461 __ addiu(FP, SP, Immediate(kPushedRegistersSize - 3 * kWordSize)); | |
462 | |
463 __ LoadPoolPointer(); | |
464 | 463 |
465 // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry | 464 // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry |
466 // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls. | 465 // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls. |
467 const intptr_t saved_result_slot_from_fp = | 466 const intptr_t saved_result_slot_from_fp = |
468 kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - V0); | 467 kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - V0); |
469 // Result in V0 is preserved as part of pushing all registers below. | 468 // Result in V0 is preserved as part of pushing all registers below. |
470 | 469 |
471 // Push registers in their enumeration order: lowest register number at | 470 // Push registers in their enumeration order: lowest register number at |
472 // lowest address. | 471 // lowest address. |
473 for (int i = 0; i < kNumberOfCpuRegisters; i++) { | 472 for (int i = 0; i < kNumberOfCpuRegisters; i++) { |
474 const int slot = 4 + kNumberOfCpuRegisters - i; | 473 const int slot = kNumberOfCpuRegisters - i; |
475 Register reg = static_cast<Register>(i); | 474 Register reg = static_cast<Register>(i); |
476 __ sw(reg, Address(SP, kPushedRegistersSize - slot * kWordSize)); | 475 if (reg == CODE_REG) { |
| 476 COMPILE_ASSERT(TMP < CODE_REG); // Assert TMP is pushed first. |
| 477 __ lw(TMP, Address(FP, kCallerSpSlotFromFp * kWordSize)); |
| 478 __ sw(TMP, Address(SP, kPushedRegistersSize - slot * kWordSize)); |
| 479 } else { |
| 480 __ sw(reg, Address(SP, kPushedRegistersSize - slot * kWordSize)); |
| 481 } |
477 } | 482 } |
478 for (int i = 0; i < kNumberOfFRegisters; i++) { | 483 for (int i = 0; i < kNumberOfFRegisters; i++) { |
479 // These go below the CPU registers. | 484 // These go below the CPU registers. |
480 const int slot = 4 + kNumberOfCpuRegisters + kNumberOfFRegisters - i; | 485 const int slot = kNumberOfCpuRegisters + kNumberOfFRegisters - i; |
481 FRegister reg = static_cast<FRegister>(i); | 486 FRegister reg = static_cast<FRegister>(i); |
482 __ swc1(reg, Address(SP, kPushedRegistersSize - slot * kWordSize)); | 487 __ swc1(reg, Address(SP, kPushedRegistersSize - slot * kWordSize)); |
483 } | 488 } |
484 | 489 |
485 __ mov(A0, SP); // Pass address of saved registers block. | 490 __ mov(A0, SP); // Pass address of saved registers block. |
| 491 __ LoadImmediate(A1, (kind == kLazyDeopt) ? 1 : 0); |
486 __ ReserveAlignedFrameSpace(1 * kWordSize); | 492 __ ReserveAlignedFrameSpace(1 * kWordSize); |
487 __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 1); | 493 __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2); |
488 // Result (V0) is stack-size (FP - SP) in bytes, incl. the return address. | 494 // Result (V0) is stack-size (FP - SP) in bytes, incl. the return address. |
489 | 495 |
| 496 const bool preserve_result = (kind == kLazyDeopt); |
490 if (preserve_result) { | 497 if (preserve_result) { |
491 // Restore result into T1 temporarily. | 498 // Restore result into T1 temporarily. |
492 __ lw(T1, Address(FP, saved_result_slot_from_fp * kWordSize)); | 499 __ lw(T1, Address(FP, saved_result_slot_from_fp * kWordSize)); |
493 } | 500 } |
494 | 501 |
| 502 __ RestoreCodePointer(); |
495 __ LeaveDartFrame(); | 503 __ LeaveDartFrame(); |
496 __ subu(SP, FP, V0); | 504 __ subu(SP, FP, V0); |
497 | 505 |
498 // DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there | 506 // DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there |
499 // is no need to set the correct PC marker or load PP, since they get patched. | 507 // is no need to set the correct PC marker or load PP, since they get patched. |
500 __ EnterStubFrame(); | 508 __ EnterStubFrame(); |
501 | 509 |
502 __ mov(A0, FP); // Get last FP address. | 510 __ mov(A0, FP); // Get last FP address. |
503 if (preserve_result) { | 511 if (preserve_result) { |
504 __ Push(T1); // Preserve result as first local. | 512 __ Push(T1); // Preserve result as first local. |
505 } | 513 } |
506 __ ReserveAlignedFrameSpace(1 * kWordSize); | 514 __ ReserveAlignedFrameSpace(1 * kWordSize); |
507 __ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1); // Pass last FP in A0. | 515 __ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1); // Pass last FP in A0. |
508 if (preserve_result) { | 516 if (preserve_result) { |
509 // Restore result into T1. | 517 // Restore result into T1. |
510 __ lw(T1, Address(FP, kFirstLocalSlotFromFp * kWordSize)); | 518 __ lw(T1, Address(FP, kFirstLocalSlotFromFp * kWordSize)); |
511 } | 519 } |
512 // Code above cannot cause GC. | 520 // Code above cannot cause GC. |
| 521 __ RestoreCodePointer(); |
513 __ LeaveStubFrame(); | 522 __ LeaveStubFrame(); |
514 | 523 |
515 // Frame is fully rewritten at this point and it is safe to perform a GC. | 524 // Frame is fully rewritten at this point and it is safe to perform a GC. |
516 // Materialize any objects that were deferred by FillFrame because they | 525 // Materialize any objects that were deferred by FillFrame because they |
517 // require allocation. | 526 // require allocation. |
518 // Enter stub frame with loading PP. The caller's PP is not materialized yet. | 527 // Enter stub frame with loading PP. The caller's PP is not materialized yet. |
519 __ EnterStubFrame(); | 528 __ EnterStubFrame(); |
520 if (preserve_result) { | 529 if (preserve_result) { |
521 __ Push(T1); // Preserve result, it will be GC-d here. | 530 __ Push(T1); // Preserve result, it will be GC-d here. |
522 } | 531 } |
523 __ PushObject(Smi::ZoneHandle()); // Space for the result. | 532 __ PushObject(Smi::ZoneHandle()); // Space for the result. |
524 __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0); | 533 __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0); |
525 // Result tells stub how many bytes to remove from the expression stack | 534 // Result tells stub how many bytes to remove from the expression stack |
526 // of the bottom-most frame. They were used as materialization arguments. | 535 // of the bottom-most frame. They were used as materialization arguments. |
527 __ Pop(T1); | 536 __ Pop(T1); |
528 if (preserve_result) { | 537 if (preserve_result) { |
529 __ Pop(V0); // Restore result. | 538 __ Pop(V0); // Restore result. |
530 } | 539 } |
531 __ LeaveStubFrame(); | 540 __ LeaveStubFrame(); |
532 // Remove materialization arguments. | 541 // Remove materialization arguments. |
533 __ SmiUntag(T1); | 542 __ SmiUntag(T1); |
534 __ addu(SP, SP, T1); | 543 __ addu(SP, SP, T1); |
535 __ Ret(); | 544 __ Ret(); |
536 } | 545 } |
537 | 546 |
538 | 547 |
539 void StubCode::GenerateDeoptimizeLazyStub(Assembler* assembler) { | 548 void StubCode::GenerateDeoptimizeLazyStub(Assembler* assembler) { |
540 // Correct return address to point just after the call that is being | 549 // Correct return address to point just after the call that is being |
541 // deoptimized. | 550 // deoptimized. |
542 __ AddImmediate(RA, -CallPattern::kFixedLengthInBytes); | 551 __ AddImmediate(RA, -CallPattern::kDeoptCallLengthInBytes); |
543 GenerateDeoptimizationSequence(assembler, true); // Preserve V0. | 552 // Push zap value instead of CODE_REG for lazy deopt. |
| 553 __ LoadImmediate(TMP, 0xf1f1f1f1); |
| 554 __ Push(TMP); |
| 555 GenerateDeoptimizationSequence(assembler, kLazyDeopt); |
544 } | 556 } |
545 | 557 |
546 | 558 |
547 void StubCode::GenerateDeoptimizeStub(Assembler* assembler) { | 559 void StubCode::GenerateDeoptimizeStub(Assembler* assembler) { |
548 GenerateDeoptimizationSequence(assembler, false); // Don't preserve V0. | 560 GenerateDeoptimizationSequence(assembler, kEagerDeopt); |
549 } | 561 } |
550 | 562 |
551 | 563 |
552 static void GenerateDispatcherCode(Assembler* assembler, | 564 static void GenerateDispatcherCode(Assembler* assembler, |
553 Label* call_target_function) { | 565 Label* call_target_function) { |
554 __ Comment("NoSuchMethodDispatch"); | 566 __ Comment("NoSuchMethodDispatch"); |
555 // When lazily generated invocation dispatchers are disabled, the | 567 // When lazily generated invocation dispatchers are disabled, the |
556 // miss-handler may return null. | 568 // miss-handler may return null. |
557 __ BranchNotEqual(T0, Object::null_object(), call_target_function); | 569 __ BranchNotEqual(T0, Object::null_object(), call_target_function); |
558 __ EnterStubFrame(); | 570 __ EnterStubFrame(); |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
608 __ sw(S5, Address(SP, 1 * kWordSize)); | 620 __ sw(S5, Address(SP, 1 * kWordSize)); |
609 __ sw(S4, Address(SP, 0 * kWordSize)); | 621 __ sw(S4, Address(SP, 0 * kWordSize)); |
610 | 622 |
611 __ CallRuntime(kMegamorphicCacheMissHandlerRuntimeEntry, 3); | 623 __ CallRuntime(kMegamorphicCacheMissHandlerRuntimeEntry, 3); |
612 | 624 |
613 __ lw(T0, Address(SP, 3 * kWordSize)); // Get result function. | 625 __ lw(T0, Address(SP, 3 * kWordSize)); // Get result function. |
614 __ lw(S4, Address(SP, 4 * kWordSize)); // Restore argument descriptor. | 626 __ lw(S4, Address(SP, 4 * kWordSize)); // Restore argument descriptor. |
615 __ lw(S5, Address(SP, 5 * kWordSize)); // Restore IC data. | 627 __ lw(S5, Address(SP, 5 * kWordSize)); // Restore IC data. |
616 __ addiu(SP, SP, Immediate(6 * kWordSize)); | 628 __ addiu(SP, SP, Immediate(6 * kWordSize)); |
617 | 629 |
| 630 __ RestoreCodePointer(); |
618 __ LeaveStubFrame(); | 631 __ LeaveStubFrame(); |
619 | 632 |
620 if (!FLAG_lazy_dispatchers) { | 633 if (!FLAG_lazy_dispatchers) { |
621 Label call_target_function; | 634 Label call_target_function; |
622 GenerateDispatcherCode(assembler, &call_target_function); | 635 GenerateDispatcherCode(assembler, &call_target_function); |
623 __ Bind(&call_target_function); | 636 __ Bind(&call_target_function); |
624 } | 637 } |
625 | 638 |
| 639 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); |
626 __ lw(T2, FieldAddress(T0, Function::entry_point_offset())); | 640 __ lw(T2, FieldAddress(T0, Function::entry_point_offset())); |
627 __ jr(T2); | 641 __ jr(T2); |
628 } | 642 } |
629 | 643 |
630 | 644 |
631 // Called for inline allocation of arrays. | 645 // Called for inline allocation of arrays. |
632 // Input parameters: | 646 // Input parameters: |
633 // RA: return address. | 647 // RA: return address. |
634 // A1: Array length as Smi (must be preserved). | 648 // A1: Array length as Smi (must be preserved). |
635 // A0: array element type (either NULL or an instantiated type). | 649 // A0: array element type (either NULL or an instantiated type). |
(...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
773 __ lw(A0, Address(SP, 0 * kWordSize)); | 787 __ lw(A0, Address(SP, 0 * kWordSize)); |
774 __ addiu(SP, SP, Immediate(3 * kWordSize)); | 788 __ addiu(SP, SP, Immediate(3 * kWordSize)); |
775 | 789 |
776 __ LeaveStubFrameAndReturn(); | 790 __ LeaveStubFrameAndReturn(); |
777 } | 791 } |
778 | 792 |
779 | 793 |
780 // Called when invoking Dart code from C++ (VM code). | 794 // Called when invoking Dart code from C++ (VM code). |
781 // Input parameters: | 795 // Input parameters: |
782 // RA : points to return address. | 796 // RA : points to return address. |
783 // A0 : entrypoint of the Dart function to call. | 797 // A0 : code object of the Dart function to call. |
784 // A1 : arguments descriptor array. | 798 // A1 : arguments descriptor array. |
785 // A2 : arguments array. | 799 // A2 : arguments array. |
786 // A3 : current thread. | 800 // A3 : current thread. |
787 void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) { | 801 void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) { |
788 // Save frame pointer coming in. | 802 // Save frame pointer coming in. |
789 __ Comment("InvokeDartCodeStub"); | 803 __ Comment("InvokeDartCodeStub"); |
790 __ EnterFrame(); | 804 __ EnterFrame(); |
791 | 805 |
792 // Save new context and C++ ABI callee-saved registers. | 806 // Save new context and C++ ABI callee-saved registers. |
793 | 807 |
(...skipping 12 matching lines...) Expand all Loading... |
806 | 820 |
807 for (intptr_t i = kAbiFirstPreservedFpuReg; | 821 for (intptr_t i = kAbiFirstPreservedFpuReg; |
808 i <= kAbiLastPreservedFpuReg; i++) { | 822 i <= kAbiLastPreservedFpuReg; i++) { |
809 FRegister r = static_cast<FRegister>(i); | 823 FRegister r = static_cast<FRegister>(i); |
810 const intptr_t slot = | 824 const intptr_t slot = |
811 kAbiPreservedCpuRegCount + kPreservedSlots + i - | 825 kAbiPreservedCpuRegCount + kPreservedSlots + i - |
812 kAbiFirstPreservedFpuReg; | 826 kAbiFirstPreservedFpuReg; |
813 __ swc1(r, Address(SP, slot * kWordSize)); | 827 __ swc1(r, Address(SP, slot * kWordSize)); |
814 } | 828 } |
815 | 829 |
816 // We now load the pool pointer(PP) as we are about to invoke dart code and we | 830 // We now load the pool pointer(PP) with a GC safe value as we are about |
817 // could potentially invoke some intrinsic functions which need the PP to be | 831 // to invoke dart code. |
818 // set up. | 832 __ LoadImmediate(PP, 0); |
819 __ LoadPoolPointer(); | |
820 | 833 |
821 // Set up THR, which caches the current thread in Dart code. | 834 // Set up THR, which caches the current thread in Dart code. |
822 if (THR != A3) { | 835 if (THR != A3) { |
823 __ mov(THR, A3); | 836 __ mov(THR, A3); |
824 } | 837 } |
825 __ LoadIsolate(T2); | 838 __ LoadIsolate(T2); |
826 | 839 |
827 // Save the current VMTag on the stack. | 840 // Save the current VMTag on the stack. |
828 __ lw(T1, Address(T2, Isolate::vm_tag_offset())); | 841 __ lw(T1, Address(T2, Isolate::vm_tag_offset())); |
829 __ sw(T1, Address(SP, 2 * kWordSize)); | 842 __ sw(T1, Address(SP, 2 * kWordSize)); |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
868 __ Push(A3); | 881 __ Push(A3); |
869 __ addiu(A1, A1, Immediate(1)); | 882 __ addiu(A1, A1, Immediate(1)); |
870 __ BranchSignedLess(A1, T1, &push_arguments); | 883 __ BranchSignedLess(A1, T1, &push_arguments); |
871 __ delay_slot()->addiu(A2, A2, Immediate(kWordSize)); | 884 __ delay_slot()->addiu(A2, A2, Immediate(kWordSize)); |
872 | 885 |
873 __ Bind(&done_push_arguments); | 886 __ Bind(&done_push_arguments); |
874 | 887 |
875 // Call the Dart code entrypoint. | 888 // Call the Dart code entrypoint. |
876 // We are calling into Dart code, here, so there is no need to call through | 889 // We are calling into Dart code, here, so there is no need to call through |
877 // T9 to match the ABI. | 890 // T9 to match the ABI. |
| 891 __ lw(CODE_REG, Address(A0, VMHandles::kOffsetOfRawPtrInHandle)); |
| 892 __ lw(A0, FieldAddress(CODE_REG, Code::entry_point_offset())); |
878 __ jalr(A0); // S4 is the arguments descriptor array. | 893 __ jalr(A0); // S4 is the arguments descriptor array. |
879 __ Comment("InvokeDartCodeStub return"); | 894 __ Comment("InvokeDartCodeStub return"); |
880 | 895 |
881 // Get rid of arguments pushed on the stack. | 896 // Get rid of arguments pushed on the stack. |
882 __ AddImmediate(SP, FP, kExitLinkSlotFromEntryFp * kWordSize); | 897 __ AddImmediate(SP, FP, kExitLinkSlotFromEntryFp * kWordSize); |
883 | 898 |
884 __ LoadIsolate(S6); | 899 __ LoadIsolate(S2); |
885 | 900 |
886 // Restore the current VMTag from the stack. | 901 // Restore the current VMTag from the stack. |
887 __ lw(T1, Address(SP, 2 * kWordSize)); | 902 __ lw(T1, Address(SP, 2 * kWordSize)); |
888 __ sw(T1, Address(S6, Isolate::vm_tag_offset())); | 903 __ sw(T1, Address(S2, Isolate::vm_tag_offset())); |
889 | 904 |
890 // Restore the saved top resource and top exit frame info back into the | 905 // Restore the saved top resource and top exit frame info back into the |
891 // Isolate structure. Uses T0 as a temporary register for this. | 906 // Isolate structure. Uses T0 as a temporary register for this. |
892 __ lw(T0, Address(SP, 1 * kWordSize)); | 907 __ lw(T0, Address(SP, 1 * kWordSize)); |
893 __ sw(T0, Address(THR, Thread::top_resource_offset())); | 908 __ sw(T0, Address(THR, Thread::top_resource_offset())); |
894 __ lw(T0, Address(SP, 0 * kWordSize)); | 909 __ lw(T0, Address(SP, 0 * kWordSize)); |
895 __ sw(T0, Address(THR, Thread::top_exit_frame_info_offset())); | 910 __ sw(T0, Address(THR, Thread::top_exit_frame_info_offset())); |
896 | 911 |
897 // Restore C++ ABI callee-saved registers. | 912 // Restore C++ ABI callee-saved registers. |
898 for (int i = S0; i <= S7; i++) { | 913 for (int i = S0; i <= S7; i++) { |
(...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1101 // Restore callee-saved registers, tear down frame. | 1116 // Restore callee-saved registers, tear down frame. |
1102 __ LeaveCallRuntimeFrame(); | 1117 __ LeaveCallRuntimeFrame(); |
1103 __ Ret(); | 1118 __ Ret(); |
1104 } | 1119 } |
1105 | 1120 |
1106 | 1121 |
1107 // Called for inline allocation of objects. | 1122 // Called for inline allocation of objects. |
1108 // Input parameters: | 1123 // Input parameters: |
1109 // RA : return address. | 1124 // RA : return address. |
1110 // SP + 0 : type arguments object (only if class is parameterized). | 1125 // SP + 0 : type arguments object (only if class is parameterized). |
1111 // Returns patch_code_pc offset where patching code for disabling the stub | 1126 void StubCode::GenerateAllocationStubForClass(Assembler* assembler, |
1112 // has been generated (similar to regularly generated Dart code). | 1127 const Class& cls) { |
1113 void StubCode::GenerateAllocationStubForClass( | |
1114 Assembler* assembler, const Class& cls, | |
1115 uword* entry_patch_offset, uword* patch_code_pc_offset) { | |
1116 __ Comment("AllocationStubForClass"); | 1128 __ Comment("AllocationStubForClass"); |
1117 *entry_patch_offset = assembler->CodeSize(); | |
1118 // The generated code is different if the class is parameterized. | 1129 // The generated code is different if the class is parameterized. |
1119 const bool is_cls_parameterized = cls.NumTypeArguments() > 0; | 1130 const bool is_cls_parameterized = cls.NumTypeArguments() > 0; |
1120 ASSERT(!is_cls_parameterized || | 1131 ASSERT(!is_cls_parameterized || |
1121 (cls.type_arguments_field_offset() != Class::kNoTypeArguments)); | 1132 (cls.type_arguments_field_offset() != Class::kNoTypeArguments)); |
1122 // kInlineInstanceSize is a constant used as a threshold for determining | 1133 // kInlineInstanceSize is a constant used as a threshold for determining |
1123 // when the object initialization should be done as a loop or as | 1134 // when the object initialization should be done as a loop or as |
1124 // straight line code. | 1135 // straight line code. |
1125 const int kInlineInstanceSize = 12; | 1136 const int kInlineInstanceSize = 12; |
1126 const intptr_t instance_size = cls.instance_size(); | 1137 const intptr_t instance_size = cls.instance_size(); |
1127 ASSERT(instance_size > 0); | 1138 ASSERT(instance_size > 0); |
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1230 __ sw(T7, Address(SP, 0 * kWordSize)); | 1241 __ sw(T7, Address(SP, 0 * kWordSize)); |
1231 } | 1242 } |
1232 __ CallRuntime(kAllocateObjectRuntimeEntry, 2); // Allocate object. | 1243 __ CallRuntime(kAllocateObjectRuntimeEntry, 2); // Allocate object. |
1233 __ Comment("AllocationStubForClass return"); | 1244 __ Comment("AllocationStubForClass return"); |
1234 // Pop result (newly allocated object). | 1245 // Pop result (newly allocated object). |
1235 __ lw(V0, Address(SP, 2 * kWordSize)); | 1246 __ lw(V0, Address(SP, 2 * kWordSize)); |
1236 __ addiu(SP, SP, Immediate(3 * kWordSize)); // Pop arguments. | 1247 __ addiu(SP, SP, Immediate(3 * kWordSize)); // Pop arguments. |
1237 // V0: new object | 1248 // V0: new object |
1238 // Restore the frame pointer and return. | 1249 // Restore the frame pointer and return. |
1239 __ LeaveStubFrameAndReturn(RA); | 1250 __ LeaveStubFrameAndReturn(RA); |
1240 *patch_code_pc_offset = assembler->CodeSize(); | |
1241 __ BranchPatchable(*StubCode::FixAllocationStubTarget_entry()); | |
1242 } | 1251 } |
1243 | 1252 |
1244 | 1253 |
1245 // Called for invoking "dynamic noSuchMethod(Invocation invocation)" function | 1254 // Called for invoking "dynamic noSuchMethod(Invocation invocation)" function |
1246 // from the entry code of a dart function after an error in passed argument | 1255 // from the entry code of a dart function after an error in passed argument |
1247 // name or number is detected. | 1256 // name or number is detected. |
1248 // Input parameters: | 1257 // Input parameters: |
1249 // RA : return address. | 1258 // RA : return address. |
1250 // SP : address of last argument. | 1259 // SP : address of last argument. |
1251 // S4: arguments descriptor array. | 1260 // S4: arguments descriptor array. |
(...skipping 299 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1551 __ CallRuntime(handle_ic_miss, num_args + 1); | 1560 __ CallRuntime(handle_ic_miss, num_args + 1); |
1552 __ Comment("NArgsCheckInlineCacheStub return"); | 1561 __ Comment("NArgsCheckInlineCacheStub return"); |
1553 // Pop returned function object into T3. | 1562 // Pop returned function object into T3. |
1554 // Restore arguments descriptor array and IC data array. | 1563 // Restore arguments descriptor array and IC data array. |
1555 __ lw(T3, Address(SP, (num_slots - 3) * kWordSize)); | 1564 __ lw(T3, Address(SP, (num_slots - 3) * kWordSize)); |
1556 __ lw(S4, Address(SP, (num_slots - 2) * kWordSize)); | 1565 __ lw(S4, Address(SP, (num_slots - 2) * kWordSize)); |
1557 __ lw(S5, Address(SP, (num_slots - 1) * kWordSize)); | 1566 __ lw(S5, Address(SP, (num_slots - 1) * kWordSize)); |
1558 // Remove the call arguments pushed earlier, including the IC data object | 1567 // Remove the call arguments pushed earlier, including the IC data object |
1559 // and the arguments descriptor array. | 1568 // and the arguments descriptor array. |
1560 __ addiu(SP, SP, Immediate(num_slots * kWordSize)); | 1569 __ addiu(SP, SP, Immediate(num_slots * kWordSize)); |
| 1570 if (range_collection_mode == kCollectRanges) { |
| 1571 __ RestoreCodePointer(); |
| 1572 } |
1561 __ LeaveStubFrame(); | 1573 __ LeaveStubFrame(); |
1562 | 1574 |
1563 Label call_target_function; | 1575 Label call_target_function; |
1564 if (!FLAG_lazy_dispatchers) { | 1576 if (!FLAG_lazy_dispatchers) { |
1565 __ mov(T0, T3); | 1577 __ mov(T0, T3); |
1566 GenerateDispatcherCode(assembler, &call_target_function); | 1578 GenerateDispatcherCode(assembler, &call_target_function); |
1567 } else { | 1579 } else { |
1568 __ b(&call_target_function); | 1580 __ b(&call_target_function); |
1569 } | 1581 } |
1570 | 1582 |
(...skipping 27 matching lines...) Expand all Loading... |
1598 __ lw(T1, Address(SP, 1 * kWordSize)); | 1610 __ lw(T1, Address(SP, 1 * kWordSize)); |
1599 } | 1611 } |
1600 __ EnterStubFrame(); | 1612 __ EnterStubFrame(); |
1601 __ addiu(SP, SP, Immediate(- frame_size * kWordSize)); | 1613 __ addiu(SP, SP, Immediate(- frame_size * kWordSize)); |
1602 __ sw(RA, Address(SP, (frame_size - 1) * kWordSize)); // Return address. | 1614 __ sw(RA, Address(SP, (frame_size - 1) * kWordSize)); // Return address. |
1603 __ sw(S5, Address(SP, (frame_size - 2) * kWordSize)); // Preserve IC data. | 1615 __ sw(S5, Address(SP, (frame_size - 2) * kWordSize)); // Preserve IC data. |
1604 __ sw(T3, Address(SP, 0 * kWordSize)); | 1616 __ sw(T3, Address(SP, 0 * kWordSize)); |
1605 if (num_args == 2) { | 1617 if (num_args == 2) { |
1606 __ sw(T1, Address(SP, 1 * kWordSize)); | 1618 __ sw(T1, Address(SP, 1 * kWordSize)); |
1607 } | 1619 } |
| 1620 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); |
1608 __ jalr(T4); | 1621 __ jalr(T4); |
1609 __ lw(S5, Address(SP, (frame_size - 2) * kWordSize)); | 1622 __ lw(S5, Address(SP, (frame_size - 2) * kWordSize)); |
1610 __ lw(RA, Address(SP, (frame_size - 1) * kWordSize)); | 1623 __ lw(RA, Address(SP, (frame_size - 1) * kWordSize)); |
1611 Label done; | 1624 Label done; |
1612 __ UpdateRangeFeedback(V0, 2, S5, T1, &done); | 1625 __ UpdateRangeFeedback(V0, 2, S5, T1, &done); |
1613 __ Bind(&done); | 1626 __ Bind(&done); |
1614 __ addiu(SP, SP, Immediate(frame_size * kWordSize)); | 1627 __ addiu(SP, SP, Immediate(frame_size * kWordSize)); |
| 1628 __ RestoreCodePointer(); |
1615 __ LeaveStubFrame(); | 1629 __ LeaveStubFrame(); |
1616 __ Ret(); | 1630 __ Ret(); |
1617 } else { | 1631 } else { |
| 1632 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); |
1618 __ jr(T4); | 1633 __ jr(T4); |
1619 } | 1634 } |
1620 | 1635 |
1621 // Call single step callback in debugger. | 1636 // Call single step callback in debugger. |
1622 if (FLAG_support_debugger && !optimized) { | 1637 if (FLAG_support_debugger && !optimized) { |
1623 __ Bind(&stepping); | 1638 __ Bind(&stepping); |
1624 __ EnterStubFrame(); | 1639 __ EnterStubFrame(); |
1625 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | 1640 __ addiu(SP, SP, Immediate(-2 * kWordSize)); |
1626 __ sw(S5, Address(SP, 1 * kWordSize)); // Preserve IC data. | 1641 __ sw(S5, Address(SP, 1 * kWordSize)); // Preserve IC data. |
1627 __ sw(RA, Address(SP, 0 * kWordSize)); // Return address. | 1642 __ sw(RA, Address(SP, 0 * kWordSize)); // Return address. |
1628 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); | 1643 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); |
1629 __ lw(RA, Address(SP, 0 * kWordSize)); | 1644 __ lw(RA, Address(SP, 0 * kWordSize)); |
1630 __ lw(S5, Address(SP, 1 * kWordSize)); | 1645 __ lw(S5, Address(SP, 1 * kWordSize)); |
1631 __ addiu(SP, SP, Immediate(2 * kWordSize)); | 1646 __ addiu(SP, SP, Immediate(2 * kWordSize)); |
| 1647 __ RestoreCodePointer(); |
1632 __ LeaveStubFrame(); | 1648 __ LeaveStubFrame(); |
1633 __ b(&done_stepping); | 1649 __ b(&done_stepping); |
1634 } | 1650 } |
1635 } | 1651 } |
1636 | 1652 |
1637 | 1653 |
1638 // Use inline cache data array to invoke the target or continue in inline | 1654 // Use inline cache data array to invoke the target or continue in inline |
1639 // cache miss handler. Stub for 1-argument check (receiver class). | 1655 // cache miss handler. Stub for 1-argument check (receiver class). |
1640 // RA: Return address. | 1656 // RA: Return address. |
1641 // S5: Inline cache data object. | 1657 // S5: Inline cache data object. |
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1767 __ LoadImmediate(T4, Smi::RawValue(Smi::kMaxValue)); | 1783 __ LoadImmediate(T4, Smi::RawValue(Smi::kMaxValue)); |
1768 __ movz(T4, T7, CMPRES1); | 1784 __ movz(T4, T7, CMPRES1); |
1769 __ sw(T4, Address(T0, count_offset)); | 1785 __ sw(T4, Address(T0, count_offset)); |
1770 } | 1786 } |
1771 | 1787 |
1772 // Load arguments descriptor into S4. | 1788 // Load arguments descriptor into S4. |
1773 __ lw(S4, FieldAddress(S5, ICData::arguments_descriptor_offset())); | 1789 __ lw(S4, FieldAddress(S5, ICData::arguments_descriptor_offset())); |
1774 | 1790 |
1775 // Get function and call it, if possible. | 1791 // Get function and call it, if possible. |
1776 __ lw(T0, Address(T0, target_offset)); | 1792 __ lw(T0, Address(T0, target_offset)); |
| 1793 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); |
1777 __ lw(T4, FieldAddress(T0, Function::entry_point_offset())); | 1794 __ lw(T4, FieldAddress(T0, Function::entry_point_offset())); |
1778 __ jr(T4); | 1795 __ jr(T4); |
1779 | 1796 |
1780 // Call single step callback in debugger. | 1797 // Call single step callback in debugger. |
1781 if (FLAG_support_debugger) { | 1798 if (FLAG_support_debugger) { |
1782 __ Bind(&stepping); | 1799 __ Bind(&stepping); |
1783 __ EnterStubFrame(); | 1800 __ EnterStubFrame(); |
1784 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | 1801 __ addiu(SP, SP, Immediate(-2 * kWordSize)); |
1785 __ sw(S5, Address(SP, 1 * kWordSize)); // Preserve IC data. | 1802 __ sw(S5, Address(SP, 1 * kWordSize)); // Preserve IC data. |
1786 __ sw(RA, Address(SP, 0 * kWordSize)); // Return address. | 1803 __ sw(RA, Address(SP, 0 * kWordSize)); // Return address. |
1787 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); | 1804 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); |
1788 __ lw(RA, Address(SP, 0 * kWordSize)); | 1805 __ lw(RA, Address(SP, 0 * kWordSize)); |
1789 __ lw(S5, Address(SP, 1 * kWordSize)); | 1806 __ lw(S5, Address(SP, 1 * kWordSize)); |
1790 __ addiu(SP, SP, Immediate(2 * kWordSize)); | 1807 __ addiu(SP, SP, Immediate(2 * kWordSize)); |
| 1808 __ RestoreCodePointer(); |
1791 __ LeaveStubFrame(); | 1809 __ LeaveStubFrame(); |
1792 __ b(&done_stepping); | 1810 __ b(&done_stepping); |
1793 } | 1811 } |
1794 } | 1812 } |
1795 | 1813 |
1796 | 1814 |
1797 void StubCode::GenerateOneArgUnoptimizedStaticCallStub(Assembler* assembler) { | 1815 void StubCode::GenerateOneArgUnoptimizedStaticCallStub(Assembler* assembler) { |
1798 GenerateUsageCounterIncrement(assembler, T0); | 1816 GenerateUsageCounterIncrement(assembler, T0); |
1799 GenerateNArgsCheckInlineCacheStub( | 1817 GenerateNArgsCheckInlineCacheStub( |
1800 assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, | 1818 assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, |
(...skipping 19 matching lines...) Expand all Loading... |
1820 __ sw(S5, Address(SP, 2 * kWordSize)); // Preserve IC data object. | 1838 __ sw(S5, Address(SP, 2 * kWordSize)); // Preserve IC data object. |
1821 __ sw(S4, Address(SP, 1 * kWordSize)); // Preserve args descriptor array. | 1839 __ sw(S4, Address(SP, 1 * kWordSize)); // Preserve args descriptor array. |
1822 __ sw(T0, Address(SP, 0 * kWordSize)); // Pass function. | 1840 __ sw(T0, Address(SP, 0 * kWordSize)); // Pass function. |
1823 __ CallRuntime(kCompileFunctionRuntimeEntry, 1); | 1841 __ CallRuntime(kCompileFunctionRuntimeEntry, 1); |
1824 __ lw(T0, Address(SP, 0 * kWordSize)); // Restore function. | 1842 __ lw(T0, Address(SP, 0 * kWordSize)); // Restore function. |
1825 __ lw(S4, Address(SP, 1 * kWordSize)); // Restore args descriptor array. | 1843 __ lw(S4, Address(SP, 1 * kWordSize)); // Restore args descriptor array. |
1826 __ lw(S5, Address(SP, 2 * kWordSize)); // Restore IC data array. | 1844 __ lw(S5, Address(SP, 2 * kWordSize)); // Restore IC data array. |
1827 __ addiu(SP, SP, Immediate(3 * kWordSize)); | 1845 __ addiu(SP, SP, Immediate(3 * kWordSize)); |
1828 __ LeaveStubFrame(); | 1846 __ LeaveStubFrame(); |
1829 | 1847 |
| 1848 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); |
1830 __ lw(T2, FieldAddress(T0, Function::entry_point_offset())); | 1849 __ lw(T2, FieldAddress(T0, Function::entry_point_offset())); |
1831 __ jr(T2); | 1850 __ jr(T2); |
1832 } | 1851 } |
1833 | 1852 |
1834 | 1853 |
1835 // S5: Contains an ICData. | 1854 // S5: Contains an ICData. |
1836 void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) { | 1855 void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) { |
1837 __ Comment("ICCallBreakpoint stub"); | 1856 __ Comment("ICCallBreakpoint stub"); |
1838 __ EnterStubFrame(); | 1857 __ EnterStubFrame(); |
1839 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | 1858 __ addiu(SP, SP, Immediate(-2 * kWordSize)); |
1840 __ sw(S5, Address(SP, 1 * kWordSize)); | 1859 __ sw(S5, Address(SP, 1 * kWordSize)); |
1841 __ LoadObject(TMP, Object::null_object()); | 1860 __ LoadObject(TMP, Object::null_object()); |
1842 __ sw(TMP, Address(SP, 0 * kWordSize)); | 1861 __ sw(TMP, Address(SP, 0 * kWordSize)); |
1843 | 1862 |
1844 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); | 1863 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); |
1845 | 1864 |
1846 __ lw(S5, Address(SP, 1 * kWordSize)); | 1865 __ lw(S5, Address(SP, 1 * kWordSize)); |
1847 __ lw(T0, Address(SP, 0 * kWordSize)); | 1866 __ lw(CODE_REG, Address(SP, 0 * kWordSize)); |
1848 __ addiu(SP, SP, Immediate(2 * kWordSize)); | 1867 __ addiu(SP, SP, Immediate(2 * kWordSize)); |
1849 __ LeaveStubFrame(); | 1868 __ LeaveStubFrame(); |
| 1869 __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); |
1850 __ jr(T0); | 1870 __ jr(T0); |
1851 } | 1871 } |
1852 | 1872 |
1853 | 1873 |
1854 void StubCode::GenerateRuntimeCallBreakpointStub(Assembler* assembler) { | 1874 void StubCode::GenerateRuntimeCallBreakpointStub(Assembler* assembler) { |
1855 __ Comment("RuntimeCallBreakpoint stub"); | 1875 __ Comment("RuntimeCallBreakpoint stub"); |
1856 __ EnterStubFrame(); | 1876 __ EnterStubFrame(); |
1857 __ addiu(SP, SP, Immediate(-1 * kWordSize)); | 1877 __ addiu(SP, SP, Immediate(-1 * kWordSize)); |
1858 __ LoadObject(TMP, Object::null_object()); | 1878 __ LoadObject(TMP, Object::null_object()); |
1859 __ sw(TMP, Address(SP, 0 * kWordSize)); | 1879 __ sw(TMP, Address(SP, 0 * kWordSize)); |
1860 | 1880 |
1861 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); | 1881 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); |
1862 | 1882 |
1863 __ lw(T0, Address(SP, 0 * kWordSize)); | 1883 __ lw(CODE_REG, Address(SP, 0 * kWordSize)); |
1864 __ addiu(SP, SP, Immediate(3 * kWordSize)); | 1884 __ addiu(SP, SP, Immediate(3 * kWordSize)); |
1865 __ LeaveStubFrame(); | 1885 __ LeaveStubFrame(); |
| 1886 __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); |
1866 __ jr(T0); | 1887 __ jr(T0); |
1867 } | 1888 } |
1868 | 1889 |
1869 | 1890 |
1870 // Called only from unoptimized code. All relevant registers have been saved. | 1891 // Called only from unoptimized code. All relevant registers have been saved. |
1871 // RA: return address. | 1892 // RA: return address. |
1872 void StubCode::GenerateDebugStepCheckStub(Assembler* assembler) { | 1893 void StubCode::GenerateDebugStepCheckStub(Assembler* assembler) { |
1873 // Check single stepping. | 1894 // Check single stepping. |
1874 Label stepping, done_stepping; | 1895 Label stepping, done_stepping; |
1875 __ LoadIsolate(T0); | 1896 __ LoadIsolate(T0); |
(...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2046 __ Comment("OptimizeFunctionStub"); | 2067 __ Comment("OptimizeFunctionStub"); |
2047 __ EnterStubFrame(); | 2068 __ EnterStubFrame(); |
2048 __ addiu(SP, SP, Immediate(-3 * kWordSize)); | 2069 __ addiu(SP, SP, Immediate(-3 * kWordSize)); |
2049 __ sw(S4, Address(SP, 2 * kWordSize)); | 2070 __ sw(S4, Address(SP, 2 * kWordSize)); |
2050 // Setup space on stack for return value. | 2071 // Setup space on stack for return value. |
2051 __ LoadObject(TMP, Object::null_object()); | 2072 __ LoadObject(TMP, Object::null_object()); |
2052 __ sw(TMP, Address(SP, 1 * kWordSize)); | 2073 __ sw(TMP, Address(SP, 1 * kWordSize)); |
2053 __ sw(T0, Address(SP, 0 * kWordSize)); | 2074 __ sw(T0, Address(SP, 0 * kWordSize)); |
2054 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1); | 2075 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1); |
2055 __ Comment("OptimizeFunctionStub return"); | 2076 __ Comment("OptimizeFunctionStub return"); |
2056 __ lw(T0, Address(SP, 1 * kWordSize)); // Get Code object | 2077 __ lw(CODE_REG, Address(SP, 1 * kWordSize)); // Get Code object |
2057 __ lw(S4, Address(SP, 2 * kWordSize)); // Restore argument descriptor. | 2078 __ lw(S4, Address(SP, 2 * kWordSize)); // Restore argument descriptor. |
2058 __ addiu(SP, SP, Immediate(3 * kWordSize)); // Discard argument. | 2079 __ addiu(SP, SP, Immediate(3 * kWordSize)); // Discard argument. |
2059 | 2080 |
2060 __ lw(T0, FieldAddress(T0, Code::entry_point_offset())); | 2081 __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); |
2061 __ LeaveStubFrameAndReturn(T0); | 2082 __ LeaveStubFrameAndReturn(T0); |
2062 __ break_(0); | 2083 __ break_(0); |
2063 } | 2084 } |
2064 | 2085 |
2065 | 2086 |
2066 // Does identical check (object references are equal or not equal) with special | 2087 // Does identical check (object references are equal or not equal) with special |
2067 // checks for boxed numbers. | 2088 // checks for boxed numbers. |
2068 // Returns: CMPRES1 is zero if equal, non-zero otherwise. | 2089 // Returns: CMPRES1 is zero if equal, non-zero otherwise. |
2069 // Note: A Mint cannot contain a value that would fit in Smi, a Bigint | 2090 // Note: A Mint cannot contain a value that would fit in Smi, a Bigint |
2070 // cannot contain a value that fits in Mint or Smi. | 2091 // cannot contain a value that fits in Mint or Smi. |
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2171 | 2192 |
2172 // Call single step callback in debugger. | 2193 // Call single step callback in debugger. |
2173 if (FLAG_support_debugger) { | 2194 if (FLAG_support_debugger) { |
2174 __ Bind(&stepping); | 2195 __ Bind(&stepping); |
2175 __ EnterStubFrame(); | 2196 __ EnterStubFrame(); |
2176 __ addiu(SP, SP, Immediate(-1 * kWordSize)); | 2197 __ addiu(SP, SP, Immediate(-1 * kWordSize)); |
2177 __ sw(RA, Address(SP, 0 * kWordSize)); // Return address. | 2198 __ sw(RA, Address(SP, 0 * kWordSize)); // Return address. |
2178 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); | 2199 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); |
2179 __ lw(RA, Address(SP, 0 * kWordSize)); | 2200 __ lw(RA, Address(SP, 0 * kWordSize)); |
2180 __ addiu(SP, SP, Immediate(1 * kWordSize)); | 2201 __ addiu(SP, SP, Immediate(1 * kWordSize)); |
| 2202 __ RestoreCodePointer(); |
2181 __ LeaveStubFrame(); | 2203 __ LeaveStubFrame(); |
2182 __ b(&done_stepping); | 2204 __ b(&done_stepping); |
2183 } | 2205 } |
2184 } | 2206 } |
2185 | 2207 |
2186 | 2208 |
2187 // Called from optimized code only. | 2209 // Called from optimized code only. |
2188 // SP + 4: left operand. | 2210 // SP + 4: left operand. |
2189 // SP + 0: right operand. | 2211 // SP + 0: right operand. |
2190 // Returns: CMPRES1 is zero if equal, non-zero otherwise. | 2212 // Returns: CMPRES1 is zero if equal, non-zero otherwise. |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2231 | 2253 |
2232 __ Bind(&call_target_function); | 2254 __ Bind(&call_target_function); |
2233 // Call the target found in the cache. For a class id match, this is a | 2255 // Call the target found in the cache. For a class id match, this is a |
2234 // proper target for the given name and arguments descriptor. If the | 2256 // proper target for the given name and arguments descriptor. If the |
2235 // illegal class id was found, the target is a cache miss handler that can | 2257 // illegal class id was found, the target is a cache miss handler that can |
2236 // be invoked as a normal Dart function. | 2258 // be invoked as a normal Dart function. |
2237 __ sll(T1, T3, 2); | 2259 __ sll(T1, T3, 2); |
2238 __ addu(T1, T2, T1); | 2260 __ addu(T1, T2, T1); |
2239 __ lw(T0, FieldAddress(T1, base + kWordSize)); | 2261 __ lw(T0, FieldAddress(T1, base + kWordSize)); |
2240 | 2262 |
| 2263 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); |
2241 __ lw(target, FieldAddress(T0, Function::entry_point_offset())); | 2264 __ lw(target, FieldAddress(T0, Function::entry_point_offset())); |
2242 } | 2265 } |
2243 | 2266 |
2244 | 2267 |
2245 // Called from megamorphic calls. | 2268 // Called from megamorphic calls. |
2246 // T0: receiver. | 2269 // T0: receiver. |
2247 // T1: lookup cache. | 2270 // T1: lookup cache. |
2248 // Result: | 2271 // Result: |
2249 // T1: entry point. | 2272 // T1: entry point. |
2250 void StubCode::GenerateMegamorphicLookupStub(Assembler* assembler) { | 2273 void StubCode::GenerateMegamorphicLookupStub(Assembler* assembler) { |
2251 EmitMegamorphicLookup(assembler, T0, T1, T1); | 2274 EmitMegamorphicLookup(assembler, T0, T1, T1); |
2252 __ Ret(); | 2275 __ Ret(); |
2253 } | 2276 } |
2254 | 2277 |
2255 } // namespace dart | 2278 } // namespace dart |
2256 | 2279 |
2257 #endif // defined TARGET_ARCH_MIPS | 2280 #endif // defined TARGET_ARCH_MIPS |
OLD | NEW |