Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(294)

Side by Side Diff: src/mips/code-stubs-mips.cc

Issue 9699071: MIPS: Branch delay slot and other optimizations. (Closed)
Patch Set: Created 8 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/mips/builtins-mips.cc ('k') | src/mips/debug-mips.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
63 __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset)); 63 __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
64 __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex); 64 __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
65 __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2)); 65 __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2));
66 } 66 }
67 67
68 68
69 void ToNumberStub::Generate(MacroAssembler* masm) { 69 void ToNumberStub::Generate(MacroAssembler* masm) {
70 // The ToNumber stub takes one argument in a0. 70 // The ToNumber stub takes one argument in a0.
71 Label check_heap_number, call_builtin; 71 Label check_heap_number, call_builtin;
72 __ JumpIfNotSmi(a0, &check_heap_number); 72 __ JumpIfNotSmi(a0, &check_heap_number);
73 __ Ret(USE_DELAY_SLOT);
73 __ mov(v0, a0); 74 __ mov(v0, a0);
74 __ Ret();
75 75
76 __ bind(&check_heap_number); 76 __ bind(&check_heap_number);
77 EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin); 77 EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
78 __ Ret(USE_DELAY_SLOT);
78 __ mov(v0, a0); 79 __ mov(v0, a0);
79 __ Ret();
80 80
81 __ bind(&call_builtin); 81 __ bind(&call_builtin);
82 __ push(a0); 82 __ push(a0);
83 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION); 83 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
84 } 84 }
85 85
86 86
87 void FastNewClosureStub::Generate(MacroAssembler* masm) { 87 void FastNewClosureStub::Generate(MacroAssembler* masm) {
88 // Create a new closure from the given function info in new 88 // Create a new closure from the given function info in new
89 // space. Set the context to the current context in cp. 89 // space. Set the context to the current context in cp.
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
121 __ sw(a2, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset)); 121 __ sw(a2, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
122 __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset)); 122 __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
123 __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset)); 123 __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
124 __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset)); 124 __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
125 __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset)); 125 __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
126 126
127 // Initialize the code pointer in the function to be the one 127 // Initialize the code pointer in the function to be the one
128 // found in the shared function info object. 128 // found in the shared function info object.
129 __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset)); 129 __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
130 __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag)); 130 __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
131 __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
132 131
133 // Return result. The argument function info has been popped already. 132 // Return result. The argument function info has been popped already.
133 __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
134 __ Ret(); 134 __ Ret();
135 135
136 // Create a new closure through the slower runtime call. 136 // Create a new closure through the slower runtime call.
137 __ bind(&gc); 137 __ bind(&gc);
138 __ LoadRoot(t0, Heap::kFalseValueRootIndex); 138 __ LoadRoot(t0, Heap::kFalseValueRootIndex);
139 __ Push(cp, a3, t0); 139 __ Push(cp, a3, t0);
140 __ TailCallRuntime(Runtime::kNewClosure, 3, 1); 140 __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
141 } 141 }
142 142
143 143
(...skipping 28 matching lines...) Expand all
172 __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX))); 172 __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
173 173
174 // Initialize the rest of the slots to undefined. 174 // Initialize the rest of the slots to undefined.
175 __ LoadRoot(a1, Heap::kUndefinedValueRootIndex); 175 __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
176 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { 176 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
177 __ sw(a1, MemOperand(v0, Context::SlotOffset(i))); 177 __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
178 } 178 }
179 179
180 // Remove the on-stack argument and return. 180 // Remove the on-stack argument and return.
181 __ mov(cp, v0); 181 __ mov(cp, v0);
182 __ Pop(); 182 __ DropAndRet(1);
183 __ Ret();
184 183
185 // Need to collect. Call into runtime system. 184 // Need to collect. Call into runtime system.
186 __ bind(&gc); 185 __ bind(&gc);
187 __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1); 186 __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
188 } 187 }
189 188
190 189
191 void FastNewBlockContextStub::Generate(MacroAssembler* masm) { 190 void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
192 // Stack layout on entry: 191 // Stack layout on entry:
193 // 192 //
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
235 __ sw(a2, ContextOperand(v0, Context::GLOBAL_INDEX)); 234 __ sw(a2, ContextOperand(v0, Context::GLOBAL_INDEX));
236 235
237 // Initialize the rest of the slots to the hole value. 236 // Initialize the rest of the slots to the hole value.
238 __ LoadRoot(a1, Heap::kTheHoleValueRootIndex); 237 __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
239 for (int i = 0; i < slots_; i++) { 238 for (int i = 0; i < slots_; i++) {
240 __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS)); 239 __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS));
241 } 240 }
242 241
243 // Remove the on-stack argument and return. 242 // Remove the on-stack argument and return.
244 __ mov(cp, v0); 243 __ mov(cp, v0);
245 __ Addu(sp, sp, Operand(2 * kPointerSize)); 244 __ DropAndRet(2);
246 __ Ret();
247 245
248 // Need to collect. Call into runtime system. 246 // Need to collect. Call into runtime system.
249 __ bind(&gc); 247 __ bind(&gc);
250 __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); 248 __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
251 } 249 }
252 250
253 251
254 static void GenerateFastCloneShallowArrayCommon( 252 static void GenerateFastCloneShallowArrayCommon(
255 MacroAssembler* masm, 253 MacroAssembler* masm,
256 int length, 254 int length,
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
361 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset)); 359 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
362 __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset)); 360 __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset));
363 __ LoadRoot(at, expected_map_index); 361 __ LoadRoot(at, expected_map_index);
364 __ Assert(eq, message, a3, Operand(at)); 362 __ Assert(eq, message, a3, Operand(at));
365 __ pop(a3); 363 __ pop(a3);
366 } 364 }
367 365
368 GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case); 366 GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
369 367
370 // Return and remove the on-stack parameters. 368 // Return and remove the on-stack parameters.
371 __ Addu(sp, sp, Operand(3 * kPointerSize)); 369 __ DropAndRet(3);
372 __ Ret();
373 370
374 __ bind(&slow_case); 371 __ bind(&slow_case);
375 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); 372 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
376 } 373 }
377 374
378 375
379 void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) { 376 void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
380 // Stack layout on entry: 377 // Stack layout on entry:
381 // 378 //
382 // [sp]: object literal flags. 379 // [sp]: object literal flags.
(...skipping 15 matching lines...) Expand all
398 395
399 // Check that the boilerplate contains only fast properties and we can 396 // Check that the boilerplate contains only fast properties and we can
400 // statically determine the instance size. 397 // statically determine the instance size.
401 int size = JSObject::kHeaderSize + length_ * kPointerSize; 398 int size = JSObject::kHeaderSize + length_ * kPointerSize;
402 __ lw(a0, FieldMemOperand(a3, HeapObject::kMapOffset)); 399 __ lw(a0, FieldMemOperand(a3, HeapObject::kMapOffset));
403 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceSizeOffset)); 400 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceSizeOffset));
404 __ Branch(&slow_case, ne, a0, Operand(size >> kPointerSizeLog2)); 401 __ Branch(&slow_case, ne, a0, Operand(size >> kPointerSizeLog2));
405 402
406 // Allocate the JS object and copy header together with all in-object 403 // Allocate the JS object and copy header together with all in-object
407 // properties from the boilerplate. 404 // properties from the boilerplate.
408 __ AllocateInNewSpace(size, a0, a1, a2, &slow_case, TAG_OBJECT); 405 __ AllocateInNewSpace(size, v0, a1, a2, &slow_case, TAG_OBJECT);
409 for (int i = 0; i < size; i += kPointerSize) { 406 for (int i = 0; i < size; i += kPointerSize) {
410 __ lw(a1, FieldMemOperand(a3, i)); 407 __ lw(a1, FieldMemOperand(a3, i));
411 __ sw(a1, FieldMemOperand(a0, i)); 408 __ sw(a1, FieldMemOperand(v0, i));
412 } 409 }
413 410
414 // Return and remove the on-stack parameters. 411 // Return and remove the on-stack parameters.
415 __ Drop(4); 412 __ DropAndRet(4);
416 __ Ret(USE_DELAY_SLOT);
417 __ mov(v0, a0);
418 413
419 __ bind(&slow_case); 414 __ bind(&slow_case);
420 __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1); 415 __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
421 } 416 }
422 417
423 418
424 // Takes a Smi and converts to an IEEE 64 bit floating point value in two 419 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
425 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and 420 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
426 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a 421 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
427 // scratch register. Destroys the source register. No GC occurs during this 422 // scratch register. Destroys the source register. No GC occurs during this
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
485 // greater than 1 (not a special case) or less than 1 (special case of 0). 480 // greater than 1 (not a special case) or less than 1 (special case of 0).
486 __ Branch(&not_special, gt, source_, Operand(1)); 481 __ Branch(&not_special, gt, source_, Operand(1));
487 482
488 // For 1 or -1 we need to or in the 0 exponent (biased to 1023). 483 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
489 static const uint32_t exponent_word_for_1 = 484 static const uint32_t exponent_word_for_1 =
490 HeapNumber::kExponentBias << HeapNumber::kExponentShift; 485 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
491 // Safe to use 'at' as dest reg here. 486 // Safe to use 'at' as dest reg here.
492 __ Or(at, exponent, Operand(exponent_word_for_1)); 487 __ Or(at, exponent, Operand(exponent_word_for_1));
493 __ Movn(exponent, at, source_); // Write exp when source not 0. 488 __ Movn(exponent, at, source_); // Write exp when source not 0.
494 // 1, 0 and -1 all have 0 for the second word. 489 // 1, 0 and -1 all have 0 for the second word.
490 __ Ret(USE_DELAY_SLOT);
495 __ mov(mantissa, zero_reg); 491 __ mov(mantissa, zero_reg);
496 __ Ret();
497 492
498 __ bind(&not_special); 493 __ bind(&not_special);
499 // Count leading zeros. 494 // Count leading zeros.
500 // Gets the wrong answer for 0, but we already checked for that case above. 495 // Gets the wrong answer for 0, but we already checked for that case above.
501 __ Clz(zeros_, source_); 496 __ Clz(zeros_, source_);
502 // Compute exponent and or it into the exponent register. 497 // Compute exponent and or it into the exponent register.
503 // We use mantissa as a scratch register here. 498 // We use mantissa as a scratch register here.
504 __ li(mantissa, Operand(31 + HeapNumber::kExponentBias)); 499 __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
505 __ subu(mantissa, mantissa, zeros_); 500 __ subu(mantissa, mantissa, zeros_);
506 __ sll(mantissa, mantissa, HeapNumber::kExponentShift); 501 __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
507 __ Or(exponent, exponent, mantissa); 502 __ Or(exponent, exponent, mantissa);
508 503
509 // Shift up the source chopping the top bit off. 504 // Shift up the source chopping the top bit off.
510 __ Addu(zeros_, zeros_, Operand(1)); 505 __ Addu(zeros_, zeros_, Operand(1));
511 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0. 506 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
512 __ sllv(source_, source_, zeros_); 507 __ sllv(source_, source_, zeros_);
513 // Compute lower part of fraction (last 12 bits). 508 // Compute lower part of fraction (last 12 bits).
514 __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord); 509 __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
515 // And the top (top 20 bits). 510 // And the top (top 20 bits).
516 __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord); 511 __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
512
513 __ Ret(USE_DELAY_SLOT);
517 __ or_(exponent, exponent, source_); 514 __ or_(exponent, exponent, source_);
518
519 __ Ret();
520 } 515 }
521 516
522 517
523 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, 518 void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
524 FloatingPointHelper::Destination destination, 519 FloatingPointHelper::Destination destination,
525 Register scratch1, 520 Register scratch1,
526 Register scratch2) { 521 Register scratch2) {
527 if (CpuFeatures::IsSupported(FPU)) { 522 if (CpuFeatures::IsSupported(FPU)) {
528 CpuFeatures::Scope scope(FPU); 523 CpuFeatures::Scope scope(FPU);
529 __ sra(scratch1, a0, kSmiTagSize); 524 __ sra(scratch1, a0, kSmiTagSize);
(...skipping 488 matching lines...) Expand 10 before | Expand all | Expand 10 after
1018 if (!IsMipsSoftFloatABI) { 1013 if (!IsMipsSoftFloatABI) {
1019 CpuFeatures::Scope scope(FPU); 1014 CpuFeatures::Scope scope(FPU);
1020 // Double returned in register f0. 1015 // Double returned in register f0.
1021 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); 1016 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
1022 } else { 1017 } else {
1023 // Double returned in registers v0 and v1. 1018 // Double returned in registers v0 and v1.
1024 __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset)); 1019 __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
1025 __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset)); 1020 __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
1026 } 1021 }
1027 // Place heap_number_result in v0 and return to the pushed return address. 1022 // Place heap_number_result in v0 and return to the pushed return address.
1023 __ pop(ra);
1024 __ Ret(USE_DELAY_SLOT);
1028 __ mov(v0, heap_number_result); 1025 __ mov(v0, heap_number_result);
1029 __ pop(ra);
1030 __ Ret();
1031 } 1026 }
1032 1027
1033 1028
1034 bool WriteInt32ToHeapNumberStub::IsPregenerated() { 1029 bool WriteInt32ToHeapNumberStub::IsPregenerated() {
1035 // These variants are compiled ahead of time. See next method. 1030 // These variants are compiled ahead of time. See next method.
1036 if (the_int_.is(a1) && 1031 if (the_int_.is(a1) &&
1037 the_heap_number_.is(v0) && 1032 the_heap_number_.is(v0) &&
1038 scratch_.is(a2) && 1033 scratch_.is(a2) &&
1039 sign_.is(a3)) { 1034 sign_.is(a3)) {
1040 return true; 1035 return true;
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
1156 // undefined >= undefined should fail. 1151 // undefined >= undefined should fail.
1157 __ li(v0, Operand(LESS)); 1152 __ li(v0, Operand(LESS));
1158 } 1153 }
1159 __ Ret(); 1154 __ Ret();
1160 } 1155 }
1161 } 1156 }
1162 } 1157 }
1163 } 1158 }
1164 1159
1165 __ bind(&return_equal); 1160 __ bind(&return_equal);
1161
1166 if (cc == less) { 1162 if (cc == less) {
1167 __ li(v0, Operand(GREATER)); // Things aren't less than themselves. 1163 __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
1168 } else if (cc == greater) { 1164 } else if (cc == greater) {
1169 __ li(v0, Operand(LESS)); // Things aren't greater than themselves. 1165 __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
1170 } else { 1166 } else {
1171 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves. 1167 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
1172 } 1168 }
1173 __ Ret(); 1169 __ Ret();
1174 1170
1175 if (cc != eq || !never_nan_nan) { 1171 if (cc != eq || !never_nan_nan) {
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
1227 (lhs.is(a1) && rhs.is(a0))); 1223 (lhs.is(a1) && rhs.is(a0)));
1228 1224
1229 Label lhs_is_smi; 1225 Label lhs_is_smi;
1230 __ JumpIfSmi(lhs, &lhs_is_smi); 1226 __ JumpIfSmi(lhs, &lhs_is_smi);
1231 // Rhs is a Smi. 1227 // Rhs is a Smi.
1232 // Check whether the non-smi is a heap number. 1228 // Check whether the non-smi is a heap number.
1233 __ GetObjectType(lhs, t4, t4); 1229 __ GetObjectType(lhs, t4, t4);
1234 if (strict) { 1230 if (strict) {
1235 // If lhs was not a number and rhs was a Smi then strict equality cannot 1231 // If lhs was not a number and rhs was a Smi then strict equality cannot
1236 // succeed. Return non-equal (lhs is already not zero). 1232 // succeed. Return non-equal (lhs is already not zero).
1233 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
1237 __ mov(v0, lhs); 1234 __ mov(v0, lhs);
1238 __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
1239 } else { 1235 } else {
1240 // Smi compared non-strictly with a non-Smi non-heap-number. Call 1236 // Smi compared non-strictly with a non-Smi non-heap-number. Call
1241 // the runtime. 1237 // the runtime.
1242 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE)); 1238 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1243 } 1239 }
1244 1240
1245 // Rhs is a smi, lhs is a number. 1241 // Rhs is a smi, lhs is a number.
1246 // Convert smi rhs to double. 1242 // Convert smi rhs to double.
1247 if (CpuFeatures::IsSupported(FPU)) { 1243 if (CpuFeatures::IsSupported(FPU)) {
1248 CpuFeatures::Scope scope(FPU); 1244 CpuFeatures::Scope scope(FPU);
(...skipping 17 matching lines...) Expand all
1266 1262
1267 // We now have both loaded as doubles. 1263 // We now have both loaded as doubles.
1268 __ jmp(both_loaded_as_doubles); 1264 __ jmp(both_loaded_as_doubles);
1269 1265
1270 __ bind(&lhs_is_smi); 1266 __ bind(&lhs_is_smi);
1271 // Lhs is a Smi. Check whether the non-smi is a heap number. 1267 // Lhs is a Smi. Check whether the non-smi is a heap number.
1272 __ GetObjectType(rhs, t4, t4); 1268 __ GetObjectType(rhs, t4, t4);
1273 if (strict) { 1269 if (strict) {
1274 // If lhs was not a number and rhs was a Smi then strict equality cannot 1270 // If lhs was not a number and rhs was a Smi then strict equality cannot
1275 // succeed. Return non-equal. 1271 // succeed. Return non-equal.
1272 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
1276 __ li(v0, Operand(1)); 1273 __ li(v0, Operand(1));
1277 __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
1278 } else { 1274 } else {
1279 // Smi compared non-strictly with a non-Smi non-heap-number. Call 1275 // Smi compared non-strictly with a non-Smi non-heap-number. Call
1280 // the runtime. 1276 // the runtime.
1281 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE)); 1277 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1282 } 1278 }
1283 1279
1284 // Lhs is a smi, rhs is a number. 1280 // Lhs is a smi, rhs is a number.
1285 // Convert smi lhs to double. 1281 // Convert smi lhs to double.
1286 if (CpuFeatures::IsSupported(FPU)) { 1282 if (CpuFeatures::IsSupported(FPU)) {
1287 CpuFeatures::Scope scope(FPU); 1283 CpuFeatures::Scope scope(FPU);
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
1347 __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg)); 1343 __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg));
1348 1344
1349 __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord); 1345 __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
1350 __ Branch(&one_is_nan, ne, t5, Operand(zero_reg)); 1346 __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
1351 1347
1352 __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg)); 1348 __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg));
1353 1349
1354 __ bind(&one_is_nan); 1350 __ bind(&one_is_nan);
1355 // NaN comparisons always fail. 1351 // NaN comparisons always fail.
1356 // Load whatever we need in v0 to make the comparison fail. 1352 // Load whatever we need in v0 to make the comparison fail.
1353
1357 if (cc == lt || cc == le) { 1354 if (cc == lt || cc == le) {
1358 __ li(v0, Operand(GREATER)); 1355 __ li(v0, Operand(GREATER));
1359 } else { 1356 } else {
1360 __ li(v0, Operand(LESS)); 1357 __ li(v0, Operand(LESS));
1361 } 1358 }
1362 __ Ret(); // Return. 1359 __ Ret();
1363 1360
1364 __ bind(&neither_is_nan); 1361 __ bind(&neither_is_nan);
1365 } 1362 }
1366 1363
1367 1364
1368 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { 1365 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
1369 // f12 and f14 have the two doubles. Neither is a NaN. 1366 // f12 and f14 have the two doubles. Neither is a NaN.
1370 // Call a native function to do a comparison between two non-NaNs. 1367 // Call a native function to do a comparison between two non-NaNs.
1371 // Call C routine that may not cause GC or other trouble. 1368 // Call C routine that may not cause GC or other trouble.
1372 // We use a call_was and return manually because we need arguments slots to 1369 // We use a call_was and return manually because we need arguments slots to
(...skipping 28 matching lines...) Expand all
1401 __ Branch(&return_result_equal, eq, v0, Operand(zero_reg)); 1398 __ Branch(&return_result_equal, eq, v0, Operand(zero_reg));
1402 // 0, -0 case. 1399 // 0, -0 case.
1403 __ sll(rhs_exponent, rhs_exponent, kSmiTagSize); 1400 __ sll(rhs_exponent, rhs_exponent, kSmiTagSize);
1404 __ sll(lhs_exponent, lhs_exponent, kSmiTagSize); 1401 __ sll(lhs_exponent, lhs_exponent, kSmiTagSize);
1405 __ or_(t4, rhs_exponent, lhs_exponent); 1402 __ or_(t4, rhs_exponent, lhs_exponent);
1406 __ or_(t4, t4, rhs_mantissa); 1403 __ or_(t4, t4, rhs_mantissa);
1407 1404
1408 __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg)); 1405 __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
1409 1406
1410 __ bind(&return_result_equal); 1407 __ bind(&return_result_equal);
1408
1411 __ li(v0, Operand(EQUAL)); 1409 __ li(v0, Operand(EQUAL));
1412 __ Ret(); 1410 __ Ret();
1413 } 1411 }
1414 1412
1415 __ bind(&return_result_not_equal); 1413 __ bind(&return_result_not_equal);
1416 1414
1417 if (!CpuFeatures::IsSupported(FPU)) { 1415 if (!CpuFeatures::IsSupported(FPU)) {
1418 __ push(ra); 1416 __ push(ra);
1419 __ PrepareCallCFunction(0, 2, t4); 1417 __ PrepareCallCFunction(0, 2, t4);
1420 if (!IsMipsSoftFloatABI) { 1418 if (!IsMipsSoftFloatABI) {
(...skipping 11 matching lines...) Expand all
1432 0, 2); 1430 0, 2);
1433 __ pop(ra); // Because this function returns int, result is in v0. 1431 __ pop(ra); // Because this function returns int, result is in v0.
1434 __ Ret(); 1432 __ Ret();
1435 } else { 1433 } else {
1436 CpuFeatures::Scope scope(FPU); 1434 CpuFeatures::Scope scope(FPU);
1437 Label equal, less_than; 1435 Label equal, less_than;
1438 __ BranchF(&equal, NULL, eq, f12, f14); 1436 __ BranchF(&equal, NULL, eq, f12, f14);
1439 __ BranchF(&less_than, NULL, lt, f12, f14); 1437 __ BranchF(&less_than, NULL, lt, f12, f14);
1440 1438
1441 // Not equal, not less, not NaN, must be greater. 1439 // Not equal, not less, not NaN, must be greater.
1440
1442 __ li(v0, Operand(GREATER)); 1441 __ li(v0, Operand(GREATER));
1443 __ Ret(); 1442 __ Ret();
1444 1443
1445 __ bind(&equal); 1444 __ bind(&equal);
1446 __ li(v0, Operand(EQUAL)); 1445 __ li(v0, Operand(EQUAL));
1447 __ Ret(); 1446 __ Ret();
1448 1447
1449 __ bind(&less_than); 1448 __ bind(&less_than);
1450 __ li(v0, Operand(LESS)); 1449 __ li(v0, Operand(LESS));
1451 __ Ret(); 1450 __ Ret();
(...skipping 10 matching lines...) Expand all
1462 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); 1461 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
1463 Label first_non_object; 1462 Label first_non_object;
1464 // Get the type of the first operand into a2 and compare it with 1463 // Get the type of the first operand into a2 and compare it with
1465 // FIRST_SPEC_OBJECT_TYPE. 1464 // FIRST_SPEC_OBJECT_TYPE.
1466 __ GetObjectType(lhs, a2, a2); 1465 __ GetObjectType(lhs, a2, a2);
1467 __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE)); 1466 __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
1468 1467
1469 // Return non-zero. 1468 // Return non-zero.
1470 Label return_not_equal; 1469 Label return_not_equal;
1471 __ bind(&return_not_equal); 1470 __ bind(&return_not_equal);
1471 __ Ret(USE_DELAY_SLOT);
1472 __ li(v0, Operand(1)); 1472 __ li(v0, Operand(1));
1473 __ Ret();
1474 1473
1475 __ bind(&first_non_object); 1474 __ bind(&first_non_object);
1476 // Check for oddballs: true, false, null, undefined. 1475 // Check for oddballs: true, false, null, undefined.
1477 __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE)); 1476 __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
1478 1477
1479 __ GetObjectType(rhs, a3, a3); 1478 __ GetObjectType(rhs, a3, a3);
1480 __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE)); 1479 __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1481 1480
1482 // Check for oddballs: true, false, null, undefined. 1481 // Check for oddballs: true, false, null, undefined.
1483 __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE)); 1482 __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
1542 __ Branch(&object_test, ne, at, Operand(zero_reg)); 1541 __ Branch(&object_test, ne, at, Operand(zero_reg));
1543 __ And(at, a2, Operand(kIsSymbolMask)); 1542 __ And(at, a2, Operand(kIsSymbolMask));
1544 __ Branch(possible_strings, eq, at, Operand(zero_reg)); 1543 __ Branch(possible_strings, eq, at, Operand(zero_reg));
1545 __ GetObjectType(rhs, a3, a3); 1544 __ GetObjectType(rhs, a3, a3);
1546 __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE)); 1545 __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
1547 __ And(at, a3, Operand(kIsSymbolMask)); 1546 __ And(at, a3, Operand(kIsSymbolMask));
1548 __ Branch(possible_strings, eq, at, Operand(zero_reg)); 1547 __ Branch(possible_strings, eq, at, Operand(zero_reg));
1549 1548
1550 // Both are symbols. We already checked they weren't the same pointer 1549 // Both are symbols. We already checked they weren't the same pointer
1551 // so they are not equal. 1550 // so they are not equal.
1551 __ Ret(USE_DELAY_SLOT);
1552 __ li(v0, Operand(1)); // Non-zero indicates not equal. 1552 __ li(v0, Operand(1)); // Non-zero indicates not equal.
1553 __ Ret();
1554 1553
1555 __ bind(&object_test); 1554 __ bind(&object_test);
1556 __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE)); 1555 __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
1557 __ GetObjectType(rhs, a2, a3); 1556 __ GetObjectType(rhs, a2, a3);
1558 __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE)); 1557 __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1559 1558
1560 // If both objects are undetectable, they are equal. Otherwise, they 1559 // If both objects are undetectable, they are equal. Otherwise, they
1561 // are not equal, since they are different objects and an object is not 1560 // are not equal, since they are different objects and an object is not
1562 // equal to undefined. 1561 // equal to undefined.
1563 __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset)); 1562 __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
1564 __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset)); 1563 __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
1565 __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset)); 1564 __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
1566 __ and_(a0, a2, a3); 1565 __ and_(a0, a2, a3);
1567 __ And(a0, a0, Operand(1 << Map::kIsUndetectable)); 1566 __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
1568 __ Xor(v0, a0, Operand(1 << Map::kIsUndetectable)); 1567 __ Ret(USE_DELAY_SLOT);
1569 __ Ret(); 1568 __ xori(v0, a0, 1 << Map::kIsUndetectable);
1570 } 1569 }
1571 1570
1572 1571
1573 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, 1572 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
1574 Register object, 1573 Register object,
1575 Register result, 1574 Register result,
1576 Register scratch1, 1575 Register scratch1,
1577 Register scratch2, 1576 Register scratch2,
1578 Register scratch3, 1577 Register scratch3,
1579 bool object_is_smi, 1578 bool object_is_smi,
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
1666 } 1665 }
1667 1666
1668 1667
1669 void NumberToStringStub::Generate(MacroAssembler* masm) { 1668 void NumberToStringStub::Generate(MacroAssembler* masm) {
1670 Label runtime; 1669 Label runtime;
1671 1670
1672 __ lw(a1, MemOperand(sp, 0)); 1671 __ lw(a1, MemOperand(sp, 0));
1673 1672
1674 // Generate code to lookup number in the number string cache. 1673 // Generate code to lookup number in the number string cache.
1675 GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime); 1674 GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
1676 __ Addu(sp, sp, Operand(1 * kPointerSize)); 1675 __ DropAndRet(1);
1677 __ Ret();
1678 1676
1679 __ bind(&runtime); 1677 __ bind(&runtime);
1680 // Handle number to string in the runtime system if not found in the cache. 1678 // Handle number to string in the runtime system if not found in the cache.
1681 __ TailCallRuntime(Runtime::kNumberToString, 1, 1); 1679 __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
1682 } 1680 }
1683 1681
1684 1682
1685 // On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared. 1683 // On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared.
1686 // On exit, v0 is 0, positive, or negative (smi) to indicate the result 1684 // On exit, v0 is 0, positive, or negative (smi) to indicate the result
1687 // of the comparison. 1685 // of the comparison.
1688 void CompareStub::Generate(MacroAssembler* masm) { 1686 void CompareStub::Generate(MacroAssembler* masm) {
1689 Label slow; // Call builtin. 1687 Label slow; // Call builtin.
1690 Label not_smis, both_loaded_as_doubles; 1688 Label not_smis, both_loaded_as_doubles;
1691 1689
1692 1690
1693 if (include_smi_compare_) { 1691 if (include_smi_compare_) {
1694 Label not_two_smis, smi_done; 1692 Label not_two_smis, smi_done;
1695 __ Or(a2, a1, a0); 1693 __ Or(a2, a1, a0);
1696 __ JumpIfNotSmi(a2, &not_two_smis); 1694 __ JumpIfNotSmi(a2, &not_two_smis);
1697 __ sra(a1, a1, 1); 1695 __ sra(a1, a1, 1);
1698 __ sra(a0, a0, 1); 1696 __ sra(a0, a0, 1);
1699 __ Subu(v0, a1, a0); 1697 __ Ret(USE_DELAY_SLOT);
1700 __ Ret(); 1698 __ subu(v0, a1, a0);
1701 __ bind(&not_two_smis); 1699 __ bind(&not_two_smis);
1702 } else if (FLAG_debug_code) { 1700 } else if (FLAG_debug_code) {
1703 __ Or(a2, a1, a0); 1701 __ Or(a2, a1, a0);
1704 __ And(a2, a2, kSmiTagMask); 1702 __ And(a2, a2, kSmiTagMask);
1705 __ Assert(ne, "CompareStub: unexpected smi operands.", 1703 __ Assert(ne, "CompareStub: unexpected smi operands.",
1706 a2, Operand(zero_reg)); 1704 a2, Operand(zero_reg));
1707 } 1705 }
1708 1706
1709 1707
1710 // NOTICE! This code is only reached after a smi-fast-case check, so 1708 // NOTICE! This code is only reached after a smi-fast-case check, so
(...skipping 198 matching lines...) Expand 10 before | Expand all | Expand 10 after
1909 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); 1907 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1910 // tos_ contains the correct non-zero return value already. 1908 // tos_ contains the correct non-zero return value already.
1911 __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE)); 1909 __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
1912 } 1910 }
1913 1911
1914 if (types_.Contains(STRING)) { 1912 if (types_.Contains(STRING)) {
1915 // String value -> false iff empty. 1913 // String value -> false iff empty.
1916 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); 1914 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1917 Label skip; 1915 Label skip;
1918 __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE)); 1916 __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE));
1917 __ Ret(USE_DELAY_SLOT); // the string length is OK as the return value
1919 __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset)); 1918 __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
1920 __ Ret(); // the string length is OK as the return value
1921 __ bind(&skip); 1919 __ bind(&skip);
1922 } 1920 }
1923 1921
1924 if (types_.Contains(HEAP_NUMBER)) { 1922 if (types_.Contains(HEAP_NUMBER)) {
1925 // Heap number -> false iff +0, -0, or NaN. 1923 // Heap number -> false iff +0, -0, or NaN.
1926 Label not_heap_number; 1924 Label not_heap_number;
1927 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); 1925 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1928 __ Branch(&not_heap_number, ne, map, Operand(at)); 1926 __ Branch(&not_heap_number, ne, map, Operand(at));
1929 Label zero_or_nan, number; 1927 Label zero_or_nan, number;
1930 __ ldc1(f2, FieldMemOperand(tos_, HeapNumber::kValueOffset)); 1928 __ ldc1(f2, FieldMemOperand(tos_, HeapNumber::kValueOffset));
(...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after
2085 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm, 2083 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
2086 Label* non_smi, 2084 Label* non_smi,
2087 Label* slow) { 2085 Label* slow) {
2088 __ JumpIfNotSmi(a0, non_smi); 2086 __ JumpIfNotSmi(a0, non_smi);
2089 2087
2090 // The result of negating zero or the smallest negative smi is not a smi. 2088 // The result of negating zero or the smallest negative smi is not a smi.
2091 __ And(t0, a0, ~0x80000000); 2089 __ And(t0, a0, ~0x80000000);
2092 __ Branch(slow, eq, t0, Operand(zero_reg)); 2090 __ Branch(slow, eq, t0, Operand(zero_reg));
2093 2091
2094 // Return '0 - value'. 2092 // Return '0 - value'.
2095 __ Subu(v0, zero_reg, a0); 2093 __ Ret(USE_DELAY_SLOT);
2096 __ Ret(); 2094 __ subu(v0, zero_reg, a0);
2097 } 2095 }
2098 2096
2099 2097
2100 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm, 2098 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
2101 Label* non_smi) { 2099 Label* non_smi) {
2102 __ JumpIfNotSmi(a0, non_smi); 2100 __ JumpIfNotSmi(a0, non_smi);
2103 2101
2104 // Flip bits and revert inverted smi-tag. 2102 // Flip bits and revert inverted smi-tag.
2105 __ Neg(v0, a0); 2103 __ Neg(v0, a0);
2106 __ And(v0, v0, ~kSmiTagMask); 2104 __ And(v0, v0, ~kSmiTagMask);
(...skipping 309 matching lines...) Expand 10 before | Expand all | Expand 10 after
2416 __ mflo(v0); 2414 __ mflo(v0);
2417 __ Ret(ne, v0, Operand(zero_reg)); 2415 __ Ret(ne, v0, Operand(zero_reg));
2418 // We need -0 if we were multiplying a negative number with 0 to get 0. 2416 // We need -0 if we were multiplying a negative number with 0 to get 0.
2419 // We know one of them was zero. 2417 // We know one of them was zero.
2420 __ Addu(scratch2, right, left); 2418 __ Addu(scratch2, right, left);
2421 Label skip; 2419 Label skip;
2422 // ARM uses the 'pl' condition, which is 'ge'. 2420 // ARM uses the 'pl' condition, which is 'ge'.
2423 // Negating it results in 'lt'. 2421 // Negating it results in 'lt'.
2424 __ Branch(&skip, lt, scratch2, Operand(zero_reg)); 2422 __ Branch(&skip, lt, scratch2, Operand(zero_reg));
2425 ASSERT(Smi::FromInt(0) == 0); 2423 ASSERT(Smi::FromInt(0) == 0);
2426 __ mov(v0, zero_reg); 2424 __ Ret(USE_DELAY_SLOT);
2427 __ Ret(); // Return smi 0 if the non-zero one was positive. 2425 __ mov(v0, zero_reg); // Return smi 0 if the non-zero one was positive.
2428 __ bind(&skip); 2426 __ bind(&skip);
2429 // We fall through here if we multiplied a negative number with 0, because 2427 // We fall through here if we multiplied a negative number with 0, because
2430 // that would mean we should produce -0. 2428 // that would mean we should produce -0.
2431 } 2429 }
2432 break; 2430 break;
2433 case Token::DIV: { 2431 case Token::DIV: {
2434 Label done; 2432 Label done;
2435 __ SmiUntag(scratch2, right); 2433 __ SmiUntag(scratch2, right);
2436 __ SmiUntag(scratch1, left); 2434 __ SmiUntag(scratch1, left);
2437 __ Div(scratch1, scratch2); 2435 __ Div(scratch1, scratch2);
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
2472 __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg)); 2470 __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
2473 __ bind(&done); 2471 __ bind(&done);
2474 // Check that the signed result fits in a Smi. 2472 // Check that the signed result fits in a Smi.
2475 __ Addu(scratch1, scratch2, Operand(0x40000000)); 2473 __ Addu(scratch1, scratch2, Operand(0x40000000));
2476 __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg)); 2474 __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
2477 __ SmiTag(v0, scratch2); 2475 __ SmiTag(v0, scratch2);
2478 __ Ret(); 2476 __ Ret();
2479 } 2477 }
2480 break; 2478 break;
2481 case Token::BIT_OR: 2479 case Token::BIT_OR:
2482 __ Or(v0, left, Operand(right)); 2480 __ Ret(USE_DELAY_SLOT);
2483 __ Ret(); 2481 __ or_(v0, left, right);
2484 break; 2482 break;
2485 case Token::BIT_AND: 2483 case Token::BIT_AND:
2486 __ And(v0, left, Operand(right)); 2484 __ Ret(USE_DELAY_SLOT);
2487 __ Ret(); 2485 __ and_(v0, left, right);
2488 break; 2486 break;
2489 case Token::BIT_XOR: 2487 case Token::BIT_XOR:
2490 __ Xor(v0, left, Operand(right)); 2488 __ Ret(USE_DELAY_SLOT);
2491 __ Ret(); 2489 __ xor_(v0, left, right);
2492 break; 2490 break;
2493 case Token::SAR: 2491 case Token::SAR:
2494 // Remove tags from right operand. 2492 // Remove tags from right operand.
2495 __ GetLeastBitsFromSmi(scratch1, right, 5); 2493 __ GetLeastBitsFromSmi(scratch1, right, 5);
2496 __ srav(scratch1, left, scratch1); 2494 __ srav(scratch1, left, scratch1);
2497 // Smi tag result. 2495 // Smi tag result.
2498 __ And(v0, scratch1, Operand(~kSmiTagMask)); 2496 __ And(v0, scratch1, ~kSmiTagMask);
2499 __ Ret(); 2497 __ Ret();
2500 break; 2498 break;
2501 case Token::SHR: 2499 case Token::SHR:
2502 // Remove tags from operands. We can't do this on a 31 bit number 2500 // Remove tags from operands. We can't do this on a 31 bit number
2503 // because then the 0s get shifted into bit 30 instead of bit 31. 2501 // because then the 0s get shifted into bit 30 instead of bit 31.
2504 __ SmiUntag(scratch1, left); 2502 __ SmiUntag(scratch1, left);
2505 __ GetLeastBitsFromSmi(scratch2, right, 5); 2503 __ GetLeastBitsFromSmi(scratch2, right, 5);
2506 __ srlv(v0, scratch1, scratch2); 2504 __ srlv(v0, scratch1, scratch2);
2507 // Unsigned shift is not allowed to produce a negative number, so 2505 // Unsigned shift is not allowed to produce a negative number, so
2508 // check the sign bit and the sign bit after Smi tagging. 2506 // check the sign bit and the sign bit after Smi tagging.
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after
2600 __ div_d(f10, f12, f14); 2598 __ div_d(f10, f12, f14);
2601 break; 2599 break;
2602 default: 2600 default:
2603 UNREACHABLE(); 2601 UNREACHABLE();
2604 } 2602 }
2605 2603
2606 // ARM uses a workaround here because of the unaligned HeapNumber 2604 // ARM uses a workaround here because of the unaligned HeapNumber
2607 // kValueOffset. On MIPS this workaround is built into sdc1 so 2605 // kValueOffset. On MIPS this workaround is built into sdc1 so
2608 // there's no point in generating even more instructions. 2606 // there's no point in generating even more instructions.
2609 __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset)); 2607 __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
2608 __ Ret(USE_DELAY_SLOT);
2610 __ mov(v0, result); 2609 __ mov(v0, result);
2611 __ Ret();
2612 } else { 2610 } else {
2613 // Call the C function to handle the double operation. 2611 // Call the C function to handle the double operation.
2614 FloatingPointHelper::CallCCodeForDoubleOperation(masm, 2612 FloatingPointHelper::CallCCodeForDoubleOperation(masm,
2615 op_, 2613 op_,
2616 result, 2614 result,
2617 scratch1); 2615 scratch1);
2618 if (FLAG_debug_code) { 2616 if (FLAG_debug_code) {
2619 __ stop("Unreachable code."); 2617 __ stop("Unreachable code.");
2620 } 2618 }
2621 } 2619 }
(...skipping 853 matching lines...) Expand 10 before | Expand all | Expand 10 after
3475 // heap number, we return the result without updating. 3473 // heap number, we return the result without updating.
3476 __ Pop(cache_entry, a2, a3); 3474 __ Pop(cache_entry, a2, a3);
3477 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex); 3475 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3478 __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update); 3476 __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
3479 __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset)); 3477 __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
3480 3478
3481 __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize)); 3479 __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
3482 __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize)); 3480 __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
3483 __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize)); 3481 __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
3484 3482
3483 __ Ret(USE_DELAY_SLOT);
3485 __ mov(v0, cache_entry); 3484 __ mov(v0, cache_entry);
3486 __ Ret();
3487 3485
3488 __ bind(&invalid_cache); 3486 __ bind(&invalid_cache);
3489 // The cache is invalid. Call runtime which will recreate the 3487 // The cache is invalid. Call runtime which will recreate the
3490 // cache. 3488 // cache.
3491 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex); 3489 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3492 __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache); 3490 __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
3493 __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset)); 3491 __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
3494 { 3492 {
3495 FrameScope scope(masm, StackFrame::INTERNAL); 3493 FrameScope scope(masm, StackFrame::INTERNAL);
3496 __ push(a0); 3494 __ push(a0);
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after
3655 Label not_plus_half; 3653 Label not_plus_half;
3656 3654
3657 // Test for 0.5. 3655 // Test for 0.5.
3658 __ Move(double_scratch, 0.5); 3656 __ Move(double_scratch, 0.5);
3659 __ BranchF(USE_DELAY_SLOT, 3657 __ BranchF(USE_DELAY_SLOT,
3660 &not_plus_half, 3658 &not_plus_half,
3661 NULL, 3659 NULL,
3662 ne, 3660 ne,
3663 double_exponent, 3661 double_exponent,
3664 double_scratch); 3662 double_scratch);
3665 3663 // double_scratch can be overwritten in the delay slot.
3666 // Calculates square root of base. Check for the special case of 3664 // Calculates square root of base. Check for the special case of
3667 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13). 3665 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
3668 __ Move(double_scratch, -V8_INFINITY); 3666 __ Move(double_scratch, -V8_INFINITY);
3669 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch); 3667 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
3670 __ neg_d(double_result, double_scratch); 3668 __ neg_d(double_result, double_scratch);
3671 3669
3672 // Add +0 to convert -0 to +0. 3670 // Add +0 to convert -0 to +0.
3673 __ add_d(double_scratch, double_base, kDoubleRegZero); 3671 __ add_d(double_scratch, double_base, kDoubleRegZero);
3674 __ sqrt_d(double_result, double_scratch); 3672 __ sqrt_d(double_result, double_scratch);
3675 __ jmp(&done); 3673 __ jmp(&done);
3676 3674
3677 __ bind(&not_plus_half); 3675 __ bind(&not_plus_half);
3678 __ Move(double_scratch, -0.5); 3676 __ Move(double_scratch, -0.5);
3679 __ BranchF(USE_DELAY_SLOT, 3677 __ BranchF(USE_DELAY_SLOT,
3680 &call_runtime, 3678 &call_runtime,
3681 NULL, 3679 NULL,
3682 ne, 3680 ne,
3683 double_exponent, 3681 double_exponent,
3684 double_scratch); 3682 double_scratch);
3685 3683 // double_scratch can be overwritten in the delay slot.
3686 // Calculates square root of base. Check for the special case of 3684 // Calculates square root of base. Check for the special case of
3687 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13). 3685 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
3688 __ Move(double_scratch, -V8_INFINITY); 3686 __ Move(double_scratch, -V8_INFINITY);
3689 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch); 3687 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
3690 __ Move(double_result, kDoubleRegZero); 3688 __ Move(double_result, kDoubleRegZero);
3691 3689
3692 // Add +0 to convert -0 to +0. 3690 // Add +0 to convert -0 to +0.
3693 __ add_d(double_scratch, double_base, kDoubleRegZero); 3691 __ add_d(double_scratch, double_base, kDoubleRegZero);
3694 __ Move(double_result, 1); 3692 __ Move(double_result, 1);
3695 __ sqrt_d(double_scratch, double_scratch); 3693 __ sqrt_d(double_scratch, double_scratch);
(...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after
3859 3857
3860 ExternalReference scope_depth = 3858 ExternalReference scope_depth =
3861 ExternalReference::heap_always_allocate_scope_depth(isolate); 3859 ExternalReference::heap_always_allocate_scope_depth(isolate);
3862 if (always_allocate) { 3860 if (always_allocate) {
3863 __ li(a0, Operand(scope_depth)); 3861 __ li(a0, Operand(scope_depth));
3864 __ lw(a1, MemOperand(a0)); 3862 __ lw(a1, MemOperand(a0));
3865 __ Addu(a1, a1, Operand(1)); 3863 __ Addu(a1, a1, Operand(1));
3866 __ sw(a1, MemOperand(a0)); 3864 __ sw(a1, MemOperand(a0));
3867 } 3865 }
3868 3866
3869 // Prepare arguments for C routine: a0 = argc, a1 = argv 3867 // Prepare arguments for C routine.
3868 // a0 = argc
3870 __ mov(a0, s0); 3869 __ mov(a0, s0);
3871 __ mov(a1, s1); 3870 // a1 = argv (set in the delay slot after find_ra below).
3872 3871
3873 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We 3872 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
3874 // also need to reserve the 4 argument slots on the stack. 3873 // also need to reserve the 4 argument slots on the stack.
3875 3874
3876 __ AssertStackIsAligned(); 3875 __ AssertStackIsAligned();
3877 3876
3878 __ li(a2, Operand(ExternalReference::isolate_address())); 3877 __ li(a2, Operand(ExternalReference::isolate_address()));
3879 3878
3880 // To let the GC traverse the return address of the exit frames, we need to 3879 // To let the GC traverse the return address of the exit frames, we need to
3881 // know where the return address is. The CEntryStub is unmovable, so 3880 // know where the return address is. The CEntryStub is unmovable, so
3882 // we can store the address on the stack to be able to find it again and 3881 // we can store the address on the stack to be able to find it again and
3883 // we never have to restore it, because it will not change. 3882 // we never have to restore it, because it will not change.
3884 { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm); 3883 { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
3885 // This branch-and-link sequence is needed to find the current PC on mips, 3884 // This branch-and-link sequence is needed to find the current PC on mips,
3886 // saved to the ra register. 3885 // saved to the ra register.
3887 // Use masm-> here instead of the double-underscore macro since extra 3886 // Use masm-> here instead of the double-underscore macro since extra
3888 // coverage code can interfere with the proper calculation of ra. 3887 // coverage code can interfere with the proper calculation of ra.
3889 Label find_ra; 3888 Label find_ra;
3890 masm->bal(&find_ra); // bal exposes branch delay slot. 3889 masm->bal(&find_ra); // bal exposes branch delay slot.
3891 masm->nop(); // Branch delay slot nop. 3890 masm->mov(a1, s1);
3892 masm->bind(&find_ra); 3891 masm->bind(&find_ra);
3893 3892
3894 // Adjust the value in ra to point to the correct return location, 2nd 3893 // Adjust the value in ra to point to the correct return location, 2nd
3895 // instruction past the real call into C code (the jalr(t9)), and push it. 3894 // instruction past the real call into C code (the jalr(t9)), and push it.
3896 // This is the return address of the exit frame. 3895 // This is the return address of the exit frame.
3897 const int kNumInstructionsToJump = 6; 3896 const int kNumInstructionsToJump = 5;
3898 masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize); 3897 masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
3899 masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame. 3898 masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
3900 masm->Subu(sp, sp, kCArgsSlotsSize); 3899 // Stack space reservation moved to the branch delay slot below.
3901 // Stack is still aligned. 3900 // Stack is still aligned.
3902 3901
3903 // Call the C routine. 3902 // Call the C routine.
3904 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC. 3903 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
3905 masm->jalr(t9); 3904 masm->jalr(t9);
3906 masm->nop(); // Branch delay slot nop. 3905 // Set up sp in the delay slot.
3906 masm->addiu(sp, sp, -kCArgsSlotsSize);
3907 // Make sure the stored 'ra' points to this position. 3907 // Make sure the stored 'ra' points to this position.
3908 ASSERT_EQ(kNumInstructionsToJump, 3908 ASSERT_EQ(kNumInstructionsToJump,
3909 masm->InstructionsGeneratedSince(&find_ra)); 3909 masm->InstructionsGeneratedSince(&find_ra));
3910 } 3910 }
3911 3911
3912 // Restore stack (remove arg slots).
3913 __ Addu(sp, sp, kCArgsSlotsSize);
3914
3915 if (always_allocate) { 3912 if (always_allocate) {
3916 // It's okay to clobber a2 and a3 here. v0 & v1 contain result. 3913 // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
3917 __ li(a2, Operand(scope_depth)); 3914 __ li(a2, Operand(scope_depth));
3918 __ lw(a3, MemOperand(a2)); 3915 __ lw(a3, MemOperand(a2));
3919 __ Subu(a3, a3, Operand(1)); 3916 __ Subu(a3, a3, Operand(1));
3920 __ sw(a3, MemOperand(a2)); 3917 __ sw(a3, MemOperand(a2));
3921 } 3918 }
3922 3919
3923 // Check for failure result. 3920 // Check for failure result.
3924 Label failure_returned; 3921 Label failure_returned;
3925 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); 3922 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
3926 __ addiu(a2, v0, 1); 3923 __ addiu(a2, v0, 1);
3927 __ andi(t0, a2, kFailureTagMask); 3924 __ andi(t0, a2, kFailureTagMask);
3928 __ Branch(&failure_returned, eq, t0, Operand(zero_reg)); 3925 __ Branch(USE_DELAY_SLOT, &failure_returned, eq, t0, Operand(zero_reg));
3926 // Restore stack (remove arg slots) in branch delay slot.
3927 __ addiu(sp, sp, kCArgsSlotsSize);
3928
3929 3929
3930 // Exit C frame and return. 3930 // Exit C frame and return.
3931 // v0:v1: result 3931 // v0:v1: result
3932 // sp: stack pointer 3932 // sp: stack pointer
3933 // fp: frame pointer 3933 // fp: frame pointer
3934 __ LeaveExitFrame(save_doubles_, s0); 3934 __ LeaveExitFrame(save_doubles_, s0, true);
3935 __ Ret();
3936 3935
3937 // Check if we should retry or throw exception. 3936 // Check if we should retry or throw exception.
3938 Label retry; 3937 Label retry;
3939 __ bind(&failure_returned); 3938 __ bind(&failure_returned);
3940 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); 3939 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
3941 __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize); 3940 __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
3942 __ Branch(&retry, eq, t0, Operand(zero_reg)); 3941 __ Branch(&retry, eq, t0, Operand(zero_reg));
3943 3942
3944 // Special handling of out of memory exceptions. 3943 // Special handling of out of memory exceptions.
3945 Failure* out_of_memory = Failure::OutOfMemoryException(); 3944 Failure* out_of_memory = Failure::OutOfMemoryException();
3946 __ Branch(throw_out_of_memory_exception, eq, 3945 __ Branch(USE_DELAY_SLOT, throw_out_of_memory_exception, eq,
3947 v0, Operand(reinterpret_cast<int32_t>(out_of_memory))); 3946 v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
3947 // If we throw the OOM exception, the value of a3 doesn't matter.
3948 // Any instruction can be in the delay slot that's not a jump.
3948 3949
3949 // Retrieve the pending exception and clear the variable. 3950 // Retrieve the pending exception and clear the variable.
3950 __ li(a3, Operand(isolate->factory()->the_hole_value())); 3951 __ li(a3, Operand(isolate->factory()->the_hole_value()));
3951 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress, 3952 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3952 isolate))); 3953 isolate)));
3953 __ lw(v0, MemOperand(t0)); 3954 __ lw(v0, MemOperand(t0));
3954 __ sw(a3, MemOperand(t0)); 3955 __ sw(a3, MemOperand(t0));
3955 3956
3956 // Special handling of termination exceptions which are uncatchable 3957 // Special handling of termination exceptions which are uncatchable
3957 // by javascript code. 3958 // by javascript code.
3958 __ Branch(throw_termination_exception, eq, 3959 __ Branch(throw_termination_exception, eq,
3959 v0, Operand(isolate->factory()->termination_exception())); 3960 v0, Operand(isolate->factory()->termination_exception()));
3960 3961
3961 // Handle normal exception. 3962 // Handle normal exception.
3962 __ jmp(throw_normal_exception); 3963 __ jmp(throw_normal_exception);
3963 3964
3964 __ bind(&retry); 3965 __ bind(&retry);
3965 // Last failure (v0) will be moved to (a0) for parameter when retrying. 3966 // Last failure (v0) will be moved to (a0) for parameter when retrying.
3966 } 3967 }
3967 3968
3968 3969
3969 void CEntryStub::Generate(MacroAssembler* masm) { 3970 void CEntryStub::Generate(MacroAssembler* masm) {
3970 // Called from JavaScript; parameters are on stack as if calling JS function 3971 // Called from JavaScript; parameters are on stack as if calling JS function
3971 // a0: number of arguments including receiver 3972 // s0: number of arguments including receiver
3972 // a1: pointer to builtin function 3973 // s1: size of arguments excluding receiver
3974 // s2: pointer to builtin function
3973 // fp: frame pointer (restored after C call) 3975 // fp: frame pointer (restored after C call)
3974 // sp: stack pointer (restored as callee's sp after C call) 3976 // sp: stack pointer (restored as callee's sp after C call)
3975 // cp: current context (C callee-saved) 3977 // cp: current context (C callee-saved)
3976 3978
3977 // NOTE: Invocations of builtins may return failure objects 3979 // NOTE: Invocations of builtins may return failure objects
3978 // instead of a proper result. The builtin entry handles 3980 // instead of a proper result. The builtin entry handles
3979 // this by performing a garbage collection and retrying the 3981 // this by performing a garbage collection and retrying the
3980 // builtin once. 3982 // builtin once.
3981 3983
3984 // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
3985 // The reason for this is that these arguments would need to be saved anyway
3986 // so it's faster to set them up directly.
3987 // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
3988
3982 // Compute the argv pointer in a callee-saved register. 3989 // Compute the argv pointer in a callee-saved register.
3983 __ sll(s1, a0, kPointerSizeLog2);
3984 __ Addu(s1, sp, s1); 3990 __ Addu(s1, sp, s1);
3985 __ Subu(s1, s1, Operand(kPointerSize));
3986 3991
3987 // Enter the exit frame that transitions from JavaScript to C++. 3992 // Enter the exit frame that transitions from JavaScript to C++.
3988 FrameScope scope(masm, StackFrame::MANUAL); 3993 FrameScope scope(masm, StackFrame::MANUAL);
3989 __ EnterExitFrame(save_doubles_); 3994 __ EnterExitFrame(save_doubles_);
3990 3995
3991 // Set up argc and the builtin function in callee-saved registers.
3992 __ mov(s0, a0);
3993 __ mov(s2, a1);
3994
3995 // s0: number of arguments (C callee-saved) 3996 // s0: number of arguments (C callee-saved)
3996 // s1: pointer to first argument (C callee-saved) 3997 // s1: pointer to first argument (C callee-saved)
3997 // s2: pointer to builtin function (C callee-saved) 3998 // s2: pointer to builtin function (C callee-saved)
3998 3999
3999 Label throw_normal_exception; 4000 Label throw_normal_exception;
4000 Label throw_termination_exception; 4001 Label throw_termination_exception;
4001 Label throw_out_of_memory_exception; 4002 Label throw_out_of_memory_exception;
4002 4003
4003 // Call into the runtime system. 4004 // Call into the runtime system.
4004 GenerateCore(masm, 4005 GenerateCore(masm,
(...skipping 681 matching lines...) Expand 10 before | Expand all | Expand 10 after
4686 __ lw(t2, MemOperand(t0, 0)); 4687 __ lw(t2, MemOperand(t0, 0));
4687 __ sll(t6, t5, 1); 4688 __ sll(t6, t5, 1);
4688 __ Addu(t1, a3, Operand(t6)); 4689 __ Addu(t1, a3, Operand(t6));
4689 __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize)); 4690 __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
4690 __ Addu(t5, t5, Operand(Smi::FromInt(1))); 4691 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
4691 4692
4692 __ bind(&arguments_test); 4693 __ bind(&arguments_test);
4693 __ Branch(&arguments_loop, lt, t5, Operand(a2)); 4694 __ Branch(&arguments_loop, lt, t5, Operand(a2));
4694 4695
4695 // Return and remove the on-stack parameters. 4696 // Return and remove the on-stack parameters.
4696 __ Addu(sp, sp, Operand(3 * kPointerSize)); 4697 __ DropAndRet(3);
4697 __ Ret();
4698 4698
4699 // Do the runtime call to allocate the arguments object. 4699 // Do the runtime call to allocate the arguments object.
4700 // a2 = argument count (tagged) 4700 // a2 = argument count (tagged)
4701 __ bind(&runtime); 4701 __ bind(&runtime);
4702 __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. 4702 __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
4703 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); 4703 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4704 } 4704 }
4705 4705
4706 4706
4707 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { 4707 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
4792 __ Addu(a2, a2, Operand(-kPointerSize)); 4792 __ Addu(a2, a2, Operand(-kPointerSize));
4793 __ lw(a3, MemOperand(a2)); 4793 __ lw(a3, MemOperand(a2));
4794 // Post-increment t0 with kPointerSize on each iteration. 4794 // Post-increment t0 with kPointerSize on each iteration.
4795 __ sw(a3, MemOperand(t0)); 4795 __ sw(a3, MemOperand(t0));
4796 __ Addu(t0, t0, Operand(kPointerSize)); 4796 __ Addu(t0, t0, Operand(kPointerSize));
4797 __ Subu(a1, a1, Operand(1)); 4797 __ Subu(a1, a1, Operand(1));
4798 __ Branch(&loop, ne, a1, Operand(zero_reg)); 4798 __ Branch(&loop, ne, a1, Operand(zero_reg));
4799 4799
4800 // Return and remove the on-stack parameters. 4800 // Return and remove the on-stack parameters.
4801 __ bind(&done); 4801 __ bind(&done);
4802 __ Addu(sp, sp, Operand(3 * kPointerSize)); 4802 __ DropAndRet(3);
4803 __ Ret();
4804 4803
4805 // Do the runtime call to allocate the arguments object. 4804 // Do the runtime call to allocate the arguments object.
4806 __ bind(&runtime); 4805 __ bind(&runtime);
4807 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1); 4806 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
4808 } 4807 }
4809 4808
4810 4809
4811 void RegExpExecStub::Generate(MacroAssembler* masm) { 4810 void RegExpExecStub::Generate(MacroAssembler* masm) {
4812 // Just jump directly to runtime if native RegExp is not selected at compile 4811 // Just jump directly to runtime if native RegExp is not selected at compile
4813 // time or if regexp entry in generated code is turned off runtime switch or 4812 // time or if regexp entry in generated code is turned off runtime switch or
(...skipping 328 matching lines...) Expand 10 before | Expand all | Expand 10 after
5142 __ Branch(&termination_exception, eq, v0, Operand(a0)); 5141 __ Branch(&termination_exception, eq, v0, Operand(a0));
5143 5142
5144 __ Throw(v0); 5143 __ Throw(v0);
5145 5144
5146 __ bind(&termination_exception); 5145 __ bind(&termination_exception);
5147 __ ThrowUncatchable(v0); 5146 __ ThrowUncatchable(v0);
5148 5147
5149 __ bind(&failure); 5148 __ bind(&failure);
5150 // For failure and exception return null. 5149 // For failure and exception return null.
5151 __ li(v0, Operand(isolate->factory()->null_value())); 5150 __ li(v0, Operand(isolate->factory()->null_value()));
5152 __ Addu(sp, sp, Operand(4 * kPointerSize)); 5151 __ DropAndRet(4);
5153 __ Ret();
5154 5152
5155 // Process the result from the native regexp code. 5153 // Process the result from the native regexp code.
5156 __ bind(&success); 5154 __ bind(&success);
5157 __ lw(a1, 5155 __ lw(a1,
5158 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); 5156 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
5159 // Calculate number of capture registers (number_of_captures + 1) * 2. 5157 // Calculate number of capture registers (number_of_captures + 1) * 2.
5160 STATIC_ASSERT(kSmiTag == 0); 5158 STATIC_ASSERT(kSmiTag == 0);
5161 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); 5159 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
5162 __ Addu(a1, a1, Operand(2)); // a1 was a smi. 5160 __ Addu(a1, a1, Operand(2)); // a1 was a smi.
5163 5161
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
5204 __ bind(&next_capture); 5202 __ bind(&next_capture);
5205 __ Subu(a1, a1, Operand(1)); 5203 __ Subu(a1, a1, Operand(1));
5206 __ Branch(&done, lt, a1, Operand(zero_reg)); 5204 __ Branch(&done, lt, a1, Operand(zero_reg));
5207 // Read the value from the static offsets vector buffer. 5205 // Read the value from the static offsets vector buffer.
5208 __ lw(a3, MemOperand(a2, 0)); 5206 __ lw(a3, MemOperand(a2, 0));
5209 __ addiu(a2, a2, kPointerSize); 5207 __ addiu(a2, a2, kPointerSize);
5210 // Store the smi value in the last match info. 5208 // Store the smi value in the last match info.
5211 __ sll(a3, a3, kSmiTagSize); // Convert to Smi. 5209 __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
5212 __ sw(a3, MemOperand(a0, 0)); 5210 __ sw(a3, MemOperand(a0, 0));
5213 __ Branch(&next_capture, USE_DELAY_SLOT); 5211 __ Branch(&next_capture, USE_DELAY_SLOT);
5214 __ addiu(a0, a0, kPointerSize); // In branch delay slot. 5212 __ addiu(a0, a0, kPointerSize); // In branch delay slot.
5215 5213
5216 __ bind(&done); 5214 __ bind(&done);
5217 5215
5218 // Return last match info. 5216 // Return last match info.
5219 __ lw(v0, MemOperand(sp, kLastMatchInfoOffset)); 5217 __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
5220 __ Addu(sp, sp, Operand(4 * kPointerSize)); 5218 __ DropAndRet(4);
5221 __ Ret();
5222 5219
5223 // External string. Short external strings have already been ruled out. 5220 // External string. Short external strings have already been ruled out.
5224 // a0: scratch 5221 // a0: scratch
5225 __ bind(&external_string); 5222 __ bind(&external_string);
5226 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset)); 5223 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
5227 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset)); 5224 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
5228 if (FLAG_debug_code) { 5225 if (FLAG_debug_code) {
5229 // Assert that we do not have a cons or slice (indirect strings) here. 5226 // Assert that we do not have a cons or slice (indirect strings) here.
5230 // Sequential strings have already been ruled out. 5227 // Sequential strings have already been ruled out.
5231 __ And(at, a0, Operand(kIsIndirectStringMask)); 5228 __ And(at, a0, Operand(kIsIndirectStringMask));
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after
5323 Label loop; 5320 Label loop;
5324 __ sll(t1, t1, kPointerSizeLog2); // Convert num elements to num bytes. 5321 __ sll(t1, t1, kPointerSizeLog2); // Convert num elements to num bytes.
5325 __ addu(t1, t1, a3); // Point past last element to store. 5322 __ addu(t1, t1, a3); // Point past last element to store.
5326 __ bind(&loop); 5323 __ bind(&loop);
5327 __ Branch(&done, ge, a3, Operand(t1)); // Break when a3 past end of elem. 5324 __ Branch(&done, ge, a3, Operand(t1)); // Break when a3 past end of elem.
5328 __ sw(a2, MemOperand(a3)); 5325 __ sw(a2, MemOperand(a3));
5329 __ Branch(&loop, USE_DELAY_SLOT); 5326 __ Branch(&loop, USE_DELAY_SLOT);
5330 __ addiu(a3, a3, kPointerSize); // In branch delay slot. 5327 __ addiu(a3, a3, kPointerSize); // In branch delay slot.
5331 5328
5332 __ bind(&done); 5329 __ bind(&done);
5333 __ Addu(sp, sp, Operand(3 * kPointerSize)); 5330 __ DropAndRet(3);
5334 __ Ret();
5335 5331
5336 __ bind(&slowcase); 5332 __ bind(&slowcase);
5337 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1); 5333 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
5338 } 5334 }
5339 5335
5340 5336
5341 static void GenerateRecordCallTarget(MacroAssembler* masm) { 5337 static void GenerateRecordCallTarget(MacroAssembler* masm) {
5342 // Cache the called function in a global property cell. Cache states 5338 // Cache the called function in a global property cell. Cache states
5343 // are uninitialized, monomorphic (indicated by a JSFunction), and 5339 // are uninitialized, monomorphic (indicated by a JSFunction), and
5344 // megamorphic. 5340 // megamorphic.
(...skipping 784 matching lines...) Expand 10 before | Expand all | Expand 10 after
6129 // v0: original string 6125 // v0: original string
6130 // a1: instance type 6126 // a1: instance type
6131 // a2: length 6127 // a2: length
6132 // a3: from index (untagged) 6128 // a3: from index (untagged)
6133 Label underlying_unpacked, sliced_string, seq_or_external_string; 6129 Label underlying_unpacked, sliced_string, seq_or_external_string;
6134 // If the string is not indirect, it can only be sequential or external. 6130 // If the string is not indirect, it can only be sequential or external.
6135 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag)); 6131 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
6136 STATIC_ASSERT(kIsIndirectStringMask != 0); 6132 STATIC_ASSERT(kIsIndirectStringMask != 0);
6137 __ And(t0, a1, Operand(kIsIndirectStringMask)); 6133 __ And(t0, a1, Operand(kIsIndirectStringMask));
6138 __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg)); 6134 __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
6139 6135 // t0 is used as a scratch register and can be overwritten in either case.
6140 __ And(t0, a1, Operand(kSlicedNotConsMask)); 6136 __ And(t0, a1, Operand(kSlicedNotConsMask));
6141 __ Branch(&sliced_string, ne, t0, Operand(zero_reg)); 6137 __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
6142 // Cons string. Check whether it is flat, then fetch first part. 6138 // Cons string. Check whether it is flat, then fetch first part.
6143 __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset)); 6139 __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
6144 __ LoadRoot(t0, Heap::kEmptyStringRootIndex); 6140 __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
6145 __ Branch(&runtime, ne, t1, Operand(t0)); 6141 __ Branch(&runtime, ne, t1, Operand(t0));
6146 __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset)); 6142 __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
6147 // Update instance type. 6143 // Update instance type.
6148 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset)); 6144 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
6149 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset)); 6145 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
(...skipping 252 matching lines...) Expand 10 before | Expand all | Expand 10 after
6402 // sp[4]: left string 6398 // sp[4]: left string
6403 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left. 6399 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
6404 __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right. 6400 __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
6405 6401
6406 Label not_same; 6402 Label not_same;
6407 __ Branch(&not_same, ne, a0, Operand(a1)); 6403 __ Branch(&not_same, ne, a0, Operand(a1));
6408 STATIC_ASSERT(EQUAL == 0); 6404 STATIC_ASSERT(EQUAL == 0);
6409 STATIC_ASSERT(kSmiTag == 0); 6405 STATIC_ASSERT(kSmiTag == 0);
6410 __ li(v0, Operand(Smi::FromInt(EQUAL))); 6406 __ li(v0, Operand(Smi::FromInt(EQUAL)));
6411 __ IncrementCounter(counters->string_compare_native(), 1, a1, a2); 6407 __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
6412 __ Addu(sp, sp, Operand(2 * kPointerSize)); 6408 __ DropAndRet(2);
6413 __ Ret();
6414 6409
6415 __ bind(&not_same); 6410 __ bind(&not_same);
6416 6411
6417 // Check that both objects are sequential ASCII strings. 6412 // Check that both objects are sequential ASCII strings.
6418 __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime); 6413 __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
6419 6414
6420 // Compare flat ASCII strings natively. Remove arguments from stack first. 6415 // Compare flat ASCII strings natively. Remove arguments from stack first.
6421 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3); 6416 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
6422 __ Addu(sp, sp, Operand(2 * kPointerSize)); 6417 __ Addu(sp, sp, Operand(2 * kPointerSize));
6423 GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1); 6418 GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
(...skipping 384 matching lines...) Expand 10 before | Expand all | Expand 10 after
6808 6803
6809 // Return a result of -1, 0, or 1, or use CompareStub for NaNs. 6804 // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
6810 Label fpu_eq, fpu_lt; 6805 Label fpu_eq, fpu_lt;
6811 // Test if equal, and also handle the unordered/NaN case. 6806 // Test if equal, and also handle the unordered/NaN case.
6812 __ BranchF(&fpu_eq, &unordered, eq, f0, f2); 6807 __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
6813 6808
6814 // Test if less (unordered case is already handled). 6809 // Test if less (unordered case is already handled).
6815 __ BranchF(&fpu_lt, NULL, lt, f0, f2); 6810 __ BranchF(&fpu_lt, NULL, lt, f0, f2);
6816 6811
6817 // Otherwise it's greater, so just fall thru, and return. 6812 // Otherwise it's greater, so just fall thru, and return.
6818 __ Ret(USE_DELAY_SLOT); 6813 __ li(v0, Operand(GREATER));
6819 __ li(v0, Operand(GREATER)); // In delay slot. 6814 __ Ret();
6820 6815
6821 __ bind(&fpu_eq); 6816 __ bind(&fpu_eq);
6822 __ Ret(USE_DELAY_SLOT); 6817 __ li(v0, Operand(EQUAL));
6823 __ li(v0, Operand(EQUAL)); // In delay slot. 6818 __ Ret();
6824 6819
6825 __ bind(&fpu_lt); 6820 __ bind(&fpu_lt);
6826 __ Ret(USE_DELAY_SLOT); 6821 __ li(v0, Operand(LESS));
6827 __ li(v0, Operand(LESS)); // In delay slot. 6822 __ Ret();
6828 } 6823 }
6829 6824
6830 __ bind(&unordered); 6825 __ bind(&unordered);
6831 6826
6832 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0); 6827 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
6833 __ bind(&generic_stub); 6828 __ bind(&generic_stub);
6834 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); 6829 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
6835 6830
6836 __ bind(&maybe_undefined1); 6831 __ bind(&maybe_undefined1);
6837 if (Token::IsOrderedRelationalCompareOp(op_)) { 6832 if (Token::IsOrderedRelationalCompareOp(op_)) {
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
6917 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); 6912 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6918 STATIC_ASSERT(kNotStringTag != 0); 6913 STATIC_ASSERT(kNotStringTag != 0);
6919 __ Or(tmp3, tmp1, tmp2); 6914 __ Or(tmp3, tmp1, tmp2);
6920 __ And(tmp5, tmp3, Operand(kIsNotStringMask)); 6915 __ And(tmp5, tmp3, Operand(kIsNotStringMask));
6921 __ Branch(&miss, ne, tmp5, Operand(zero_reg)); 6916 __ Branch(&miss, ne, tmp5, Operand(zero_reg));
6922 6917
6923 // Fast check for identical strings. 6918 // Fast check for identical strings.
6924 Label left_ne_right; 6919 Label left_ne_right;
6925 STATIC_ASSERT(EQUAL == 0); 6920 STATIC_ASSERT(EQUAL == 0);
6926 STATIC_ASSERT(kSmiTag == 0); 6921 STATIC_ASSERT(kSmiTag == 0);
6927 __ Branch(&left_ne_right, ne, left, Operand(right), USE_DELAY_SLOT); 6922 __ Branch(&left_ne_right, ne, left, Operand(right));
6923 __ Ret(USE_DELAY_SLOT);
6928 __ mov(v0, zero_reg); // In the delay slot. 6924 __ mov(v0, zero_reg); // In the delay slot.
6929 __ Ret();
6930 __ bind(&left_ne_right); 6925 __ bind(&left_ne_right);
6931 6926
6932 // Handle not identical strings. 6927 // Handle not identical strings.
6933 6928
6934 // Check that both strings are symbols. If they are, we're done 6929 // Check that both strings are symbols. If they are, we're done
6935 // because we already know they are not identical. 6930 // because we already know they are not identical.
6936 if (equality) { 6931 if (equality) {
6937 ASSERT(GetCondition() == eq); 6932 ASSERT(GetCondition() == eq);
6938 STATIC_ASSERT(kSymbolTag != 0); 6933 STATIC_ASSERT(kSymbolTag != 0);
6939 __ And(tmp3, tmp1, Operand(tmp2)); 6934 __ And(tmp3, tmp1, Operand(tmp2));
6940 __ And(tmp5, tmp3, Operand(kIsSymbolMask)); 6935 __ And(tmp5, tmp3, Operand(kIsSymbolMask));
6941 Label is_symbol; 6936 Label is_symbol;
6942 __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg), USE_DELAY_SLOT); 6937 __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg));
6943 __ mov(v0, a0); // In the delay slot.
6944 // Make sure a0 is non-zero. At this point input operands are 6938 // Make sure a0 is non-zero. At this point input operands are
6945 // guaranteed to be non-zero. 6939 // guaranteed to be non-zero.
6946 ASSERT(right.is(a0)); 6940 ASSERT(right.is(a0));
6947 __ Ret(); 6941 __ Ret(USE_DELAY_SLOT);
6942 __ mov(v0, a0); // In the delay slot.
6948 __ bind(&is_symbol); 6943 __ bind(&is_symbol);
6949 } 6944 }
6950 6945
6951 // Check that both strings are sequential ASCII. 6946 // Check that both strings are sequential ASCII.
6952 Label runtime; 6947 Label runtime;
6953 __ JumpIfBothInstanceTypesAreNotSequentialAscii( 6948 __ JumpIfBothInstanceTypesAreNotSequentialAscii(
6954 tmp1, tmp2, tmp3, tmp4, &runtime); 6949 tmp1, tmp2, tmp3, tmp4, &runtime);
6955 6950
6956 // Compare flat ASCII strings. Returns when done. 6951 // Compare flat ASCII strings. Returns when done.
6957 if (equality) { 6952 if (equality) {
(...skipping 23 matching lines...) Expand all
6981 Label miss; 6976 Label miss;
6982 __ And(a2, a1, Operand(a0)); 6977 __ And(a2, a1, Operand(a0));
6983 __ JumpIfSmi(a2, &miss); 6978 __ JumpIfSmi(a2, &miss);
6984 6979
6985 __ GetObjectType(a0, a2, a2); 6980 __ GetObjectType(a0, a2, a2);
6986 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); 6981 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
6987 __ GetObjectType(a1, a2, a2); 6982 __ GetObjectType(a1, a2, a2);
6988 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); 6983 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
6989 6984
6990 ASSERT(GetCondition() == eq); 6985 ASSERT(GetCondition() == eq);
6991 __ Subu(v0, a0, Operand(a1)); 6986 __ Ret(USE_DELAY_SLOT);
6992 __ Ret(); 6987 __ subu(v0, a0, a1);
6993 6988
6994 __ bind(&miss); 6989 __ bind(&miss);
6995 GenerateMiss(masm); 6990 GenerateMiss(masm);
6996 } 6991 }
6997 6992
6998 6993
6999 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { 6994 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
7000 Label miss; 6995 Label miss;
7001 __ And(a2, a1, a0); 6996 __ And(a2, a1, a0);
7002 __ JumpIfSmi(a2, &miss); 6997 __ JumpIfSmi(a2, &miss);
(...skipping 12 matching lines...) Expand all
7015 void ICCompareStub::GenerateMiss(MacroAssembler* masm) { 7010 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
7016 { 7011 {
7017 // Call the runtime system in a fresh internal frame. 7012 // Call the runtime system in a fresh internal frame.
7018 ExternalReference miss = 7013 ExternalReference miss =
7019 ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); 7014 ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
7020 FrameScope scope(masm, StackFrame::INTERNAL); 7015 FrameScope scope(masm, StackFrame::INTERNAL);
7021 __ Push(a1, a0); 7016 __ Push(a1, a0);
7022 __ push(ra); 7017 __ push(ra);
7023 __ Push(a1, a0); 7018 __ Push(a1, a0);
7024 __ li(t0, Operand(Smi::FromInt(op_))); 7019 __ li(t0, Operand(Smi::FromInt(op_)));
7025 __ push(t0); 7020 __ addiu(sp, sp, -kPointerSize);
7026 __ CallExternalReference(miss, 3); 7021 __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
7022 __ sw(t0, MemOperand(sp)); // In the delay slot.
7027 // Compute the entry point of the rewritten stub. 7023 // Compute the entry point of the rewritten stub.
7028 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); 7024 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
7029 // Restore registers. 7025 // Restore registers.
7030 __ Pop(a1, a0, ra); 7026 __ Pop(a1, a0, ra);
7031 } 7027 }
7032 __ Jump(a2); 7028 __ Jump(a2);
7033 } 7029 }
7034 7030
7035 7031
7036 void DirectCEntryStub::Generate(MacroAssembler* masm) { 7032 void DirectCEntryStub::Generate(MacroAssembler* masm) {
(...skipping 289 matching lines...) Expand 10 before | Expand all | Expand 10 after
7326 __ And(result, entry_key, Operand(kIsSymbolMask)); 7322 __ And(result, entry_key, Operand(kIsSymbolMask));
7327 __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg)); 7323 __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg));
7328 } 7324 }
7329 } 7325 }
7330 7326
7331 __ bind(&maybe_in_dictionary); 7327 __ bind(&maybe_in_dictionary);
7332 // If we are doing negative lookup then probing failure should be 7328 // If we are doing negative lookup then probing failure should be
7333 // treated as a lookup success. For positive lookup probing failure 7329 // treated as a lookup success. For positive lookup probing failure
7334 // should be treated as lookup failure. 7330 // should be treated as lookup failure.
7335 if (mode_ == POSITIVE_LOOKUP) { 7331 if (mode_ == POSITIVE_LOOKUP) {
7332 __ Ret(USE_DELAY_SLOT);
7336 __ mov(result, zero_reg); 7333 __ mov(result, zero_reg);
7337 __ Ret();
7338 } 7334 }
7339 7335
7340 __ bind(&in_dictionary); 7336 __ bind(&in_dictionary);
7337 __ Ret(USE_DELAY_SLOT);
7341 __ li(result, 1); 7338 __ li(result, 1);
7342 __ Ret();
7343 7339
7344 __ bind(&not_in_dictionary); 7340 __ bind(&not_in_dictionary);
7341 __ Ret(USE_DELAY_SLOT);
7345 __ mov(result, zero_reg); 7342 __ mov(result, zero_reg);
7346 __ Ret();
7347 } 7343 }
7348 7344
7349 7345
7350 struct AheadOfTimeWriteBarrierStubList { 7346 struct AheadOfTimeWriteBarrierStubList {
7351 Register object, value, address; 7347 Register object, value, address;
7352 RememberedSetAction action; 7348 RememberedSetAction action;
7353 }; 7349 };
7354 7350
7355 7351
7356 struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { 7352 struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
(...skipping 325 matching lines...) Expand 10 before | Expand all | Expand 10 after
7682 __ Ret(USE_DELAY_SLOT); 7678 __ Ret(USE_DELAY_SLOT);
7683 __ mov(v0, a0); 7679 __ mov(v0, a0);
7684 } 7680 }
7685 7681
7686 7682
7687 #undef __ 7683 #undef __
7688 7684
7689 } } // namespace v8::internal 7685 } } // namespace v8::internal
7690 7686
7691 #endif // V8_TARGET_ARCH_MIPS 7687 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « src/mips/builtins-mips.cc ('k') | src/mips/debug-mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698