| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. | 11 // with the distribution. |
| (...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 151 v0, | 151 v0, |
| 152 a1, | 152 a1, |
| 153 a2, | 153 a2, |
| 154 &gc, | 154 &gc, |
| 155 TAG_OBJECT); | 155 TAG_OBJECT); |
| 156 | 156 |
| 157 // Load the function from the stack. | 157 // Load the function from the stack. |
| 158 __ lw(a3, MemOperand(sp, 0)); | 158 __ lw(a3, MemOperand(sp, 0)); |
| 159 | 159 |
| 160 // Set up the object header. | 160 // Set up the object header. |
| 161 __ LoadRoot(a2, Heap::kFunctionContextMapRootIndex); | 161 __ LoadRoot(a1, Heap::kFunctionContextMapRootIndex); |
| 162 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); | |
| 163 __ li(a2, Operand(Smi::FromInt(length))); | 162 __ li(a2, Operand(Smi::FromInt(length))); |
| 164 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset)); | 163 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset)); |
| 164 __ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); |
| 165 | 165 |
| 166 // Set up the fixed slots. | 166 // Set up the fixed slots, copy the global object from the previous context. |
| 167 __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); |
| 167 __ li(a1, Operand(Smi::FromInt(0))); | 168 __ li(a1, Operand(Smi::FromInt(0))); |
| 168 __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX))); | 169 __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX))); |
| 169 __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX))); | 170 __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX))); |
| 170 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX))); | 171 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX))); |
| 171 | 172 __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX))); |
| 172 // Copy the global object from the previous context. | |
| 173 __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); | |
| 174 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX))); | |
| 175 | 173 |
| 176 // Initialize the rest of the slots to undefined. | 174 // Initialize the rest of the slots to undefined. |
| 177 __ LoadRoot(a1, Heap::kUndefinedValueRootIndex); | 175 __ LoadRoot(a1, Heap::kUndefinedValueRootIndex); |
| 178 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { | 176 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { |
| 179 __ sw(a1, MemOperand(v0, Context::SlotOffset(i))); | 177 __ sw(a1, MemOperand(v0, Context::SlotOffset(i))); |
| 180 } | 178 } |
| 181 | 179 |
| 182 // Remove the on-stack argument and return. | 180 // Remove the on-stack argument and return. |
| 183 __ mov(cp, v0); | 181 __ mov(cp, v0); |
| 184 __ Pop(); | 182 __ Pop(); |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 222 __ JumpIfNotSmi(a3, &after_sentinel); | 220 __ JumpIfNotSmi(a3, &after_sentinel); |
| 223 if (FLAG_debug_code) { | 221 if (FLAG_debug_code) { |
| 224 const char* message = "Expected 0 as a Smi sentinel"; | 222 const char* message = "Expected 0 as a Smi sentinel"; |
| 225 __ Assert(eq, message, a3, Operand(zero_reg)); | 223 __ Assert(eq, message, a3, Operand(zero_reg)); |
| 226 } | 224 } |
| 227 __ lw(a3, GlobalObjectOperand()); | 225 __ lw(a3, GlobalObjectOperand()); |
| 228 __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset)); | 226 __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset)); |
| 229 __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX)); | 227 __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX)); |
| 230 __ bind(&after_sentinel); | 228 __ bind(&after_sentinel); |
| 231 | 229 |
| 232 // Set up the fixed slots. | 230 // Set up the fixed slots, copy the global object from the previous context. |
| 231 __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX)); |
| 233 __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX)); | 232 __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX)); |
| 234 __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX)); | 233 __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX)); |
| 235 __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX)); | 234 __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX)); |
| 236 | 235 __ sw(a2, ContextOperand(v0, Context::GLOBAL_INDEX)); |
| 237 // Copy the global object from the previous context. | |
| 238 __ lw(a1, ContextOperand(cp, Context::GLOBAL_INDEX)); | |
| 239 __ sw(a1, ContextOperand(v0, Context::GLOBAL_INDEX)); | |
| 240 | 236 |
| 241 // Initialize the rest of the slots to the hole value. | 237 // Initialize the rest of the slots to the hole value. |
| 242 __ LoadRoot(a1, Heap::kTheHoleValueRootIndex); | 238 __ LoadRoot(a1, Heap::kTheHoleValueRootIndex); |
| 243 for (int i = 0; i < slots_; i++) { | 239 for (int i = 0; i < slots_; i++) { |
| 244 __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS)); | 240 __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS)); |
| 245 } | 241 } |
| 246 | 242 |
| 247 // Remove the on-stack argument and return. | 243 // Remove the on-stack argument and return. |
| 248 __ mov(cp, v0); | 244 __ mov(cp, v0); |
| 249 __ Addu(sp, sp, Operand(2 * kPointerSize)); | 245 __ Addu(sp, sp, Operand(2 * kPointerSize)); |
| (...skipping 335 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 585 Register scratch2, | 581 Register scratch2, |
| 586 Label* not_number) { | 582 Label* not_number) { |
| 587 if (FLAG_debug_code) { | 583 if (FLAG_debug_code) { |
| 588 __ AbortIfNotRootValue(heap_number_map, | 584 __ AbortIfNotRootValue(heap_number_map, |
| 589 Heap::kHeapNumberMapRootIndex, | 585 Heap::kHeapNumberMapRootIndex, |
| 590 "HeapNumberMap register clobbered."); | 586 "HeapNumberMap register clobbered."); |
| 591 } | 587 } |
| 592 | 588 |
| 593 Label is_smi, done; | 589 Label is_smi, done; |
| 594 | 590 |
| 595 __ JumpIfSmi(object, &is_smi); | 591 // Smi-check |
| 592 __ UntagAndJumpIfSmi(scratch1, object, &is_smi); |
| 593 // Heap number check |
| 596 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); | 594 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); |
| 597 | 595 |
| 598 // Handle loading a double from a heap number. | 596 // Handle loading a double from a heap number. |
| 599 if (CpuFeatures::IsSupported(FPU) && | 597 if (CpuFeatures::IsSupported(FPU) && |
| 600 destination == kFPURegisters) { | 598 destination == kFPURegisters) { |
| 601 CpuFeatures::Scope scope(FPU); | 599 CpuFeatures::Scope scope(FPU); |
| 602 // Load the double from tagged HeapNumber to double register. | 600 // Load the double from tagged HeapNumber to double register. |
| 603 | 601 |
| 604 // ARM uses a workaround here because of the unaligned HeapNumber | 602 // ARM uses a workaround here because of the unaligned HeapNumber |
| 605 // kValueOffset. On MIPS this workaround is built into ldc1 so there's no | 603 // kValueOffset. On MIPS this workaround is built into ldc1 so there's no |
| 606 // point in generating even more instructions. | 604 // point in generating even more instructions. |
| 607 __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset)); | 605 __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset)); |
| 608 } else { | 606 } else { |
| 609 ASSERT(destination == kCoreRegisters); | 607 ASSERT(destination == kCoreRegisters); |
| 610 // Load the double from heap number to dst1 and dst2 in double format. | 608 // Load the double from heap number to dst1 and dst2 in double format. |
| 611 __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset)); | 609 __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset)); |
| 612 __ lw(dst2, FieldMemOperand(object, | 610 __ lw(dst2, FieldMemOperand(object, |
| 613 HeapNumber::kValueOffset + kPointerSize)); | 611 HeapNumber::kValueOffset + kPointerSize)); |
| 614 } | 612 } |
| 615 __ Branch(&done); | 613 __ Branch(&done); |
| 616 | 614 |
| 617 // Handle loading a double from a smi. | 615 // Handle loading a double from a smi. |
| 618 __ bind(&is_smi); | 616 __ bind(&is_smi); |
| 619 if (CpuFeatures::IsSupported(FPU)) { | 617 if (CpuFeatures::IsSupported(FPU)) { |
| 620 CpuFeatures::Scope scope(FPU); | 618 CpuFeatures::Scope scope(FPU); |
| 621 // Convert smi to double using FPU instructions. | 619 // Convert smi to double using FPU instructions. |
| 622 __ SmiUntag(scratch1, object); | |
| 623 __ mtc1(scratch1, dst); | 620 __ mtc1(scratch1, dst); |
| 624 __ cvt_d_w(dst, dst); | 621 __ cvt_d_w(dst, dst); |
| 625 if (destination == kCoreRegisters) { | 622 if (destination == kCoreRegisters) { |
| 626 // Load the converted smi to dst1 and dst2 in double format. | 623 // Load the converted smi to dst1 and dst2 in double format. |
| 627 __ Move(dst1, dst2, dst); | 624 __ Move(dst1, dst2, dst); |
| 628 } | 625 } |
| 629 } else { | 626 } else { |
| 630 ASSERT(destination == kCoreRegisters); | 627 ASSERT(destination == kCoreRegisters); |
| 631 // Write smi to dst1 and dst2 double format. | 628 // Write smi to dst1 and dst2 double format. |
| 632 __ mov(scratch1, object); | 629 __ mov(scratch1, object); |
| (...skipping 14 matching lines...) Expand all Loading... |
| 647 Register scratch1, | 644 Register scratch1, |
| 648 Register scratch2, | 645 Register scratch2, |
| 649 Register scratch3, | 646 Register scratch3, |
| 650 FPURegister double_scratch, | 647 FPURegister double_scratch, |
| 651 Label* not_number) { | 648 Label* not_number) { |
| 652 if (FLAG_debug_code) { | 649 if (FLAG_debug_code) { |
| 653 __ AbortIfNotRootValue(heap_number_map, | 650 __ AbortIfNotRootValue(heap_number_map, |
| 654 Heap::kHeapNumberMapRootIndex, | 651 Heap::kHeapNumberMapRootIndex, |
| 655 "HeapNumberMap register clobbered."); | 652 "HeapNumberMap register clobbered."); |
| 656 } | 653 } |
| 657 Label is_smi; | |
| 658 Label done; | 654 Label done; |
| 659 Label not_in_int32_range; | 655 Label not_in_int32_range; |
| 660 | 656 |
| 661 __ JumpIfSmi(object, &is_smi); | 657 __ UntagAndJumpIfSmi(dst, object, &done); |
| 662 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset)); | 658 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset)); |
| 663 __ Branch(not_number, ne, scratch1, Operand(heap_number_map)); | 659 __ Branch(not_number, ne, scratch1, Operand(heap_number_map)); |
| 664 __ ConvertToInt32(object, | 660 __ ConvertToInt32(object, |
| 665 dst, | 661 dst, |
| 666 scratch1, | 662 scratch1, |
| 667 scratch2, | 663 scratch2, |
| 668 double_scratch, | 664 double_scratch, |
| 669 ¬_in_int32_range); | 665 ¬_in_int32_range); |
| 670 __ jmp(&done); | 666 __ jmp(&done); |
| 671 | 667 |
| 672 __ bind(¬_in_int32_range); | 668 __ bind(¬_in_int32_range); |
| 673 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); | 669 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); |
| 674 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset)); | 670 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset)); |
| 675 | 671 |
| 676 __ EmitOutOfInt32RangeTruncate(dst, | 672 __ EmitOutOfInt32RangeTruncate(dst, |
| 677 scratch1, | 673 scratch1, |
| 678 scratch2, | 674 scratch2, |
| 679 scratch3); | 675 scratch3); |
| 680 | 676 |
| 681 __ jmp(&done); | |
| 682 | |
| 683 __ bind(&is_smi); | |
| 684 __ SmiUntag(dst, object); | |
| 685 __ bind(&done); | 677 __ bind(&done); |
| 686 } | 678 } |
| 687 | 679 |
| 688 | 680 |
| 689 void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, | 681 void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, |
| 690 Register int_scratch, | 682 Register int_scratch, |
| 691 Destination destination, | 683 Destination destination, |
| 692 FPURegister double_dst, | 684 FPURegister double_dst, |
| 693 Register dst1, | 685 Register dst1, |
| 694 Register dst2, | 686 Register dst2, |
| (...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 856 DoubleRegister double_scratch, | 848 DoubleRegister double_scratch, |
| 857 Label* not_int32) { | 849 Label* not_int32) { |
| 858 ASSERT(!dst.is(object)); | 850 ASSERT(!dst.is(object)); |
| 859 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); | 851 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); |
| 860 ASSERT(!scratch1.is(scratch2) && | 852 ASSERT(!scratch1.is(scratch2) && |
| 861 !scratch1.is(scratch3) && | 853 !scratch1.is(scratch3) && |
| 862 !scratch2.is(scratch3)); | 854 !scratch2.is(scratch3)); |
| 863 | 855 |
| 864 Label done; | 856 Label done; |
| 865 | 857 |
| 866 // Untag the object into the destination register. | 858 __ UntagAndJumpIfSmi(dst, object, &done); |
| 867 __ SmiUntag(dst, object); | |
| 868 // Just return if the object is a smi. | |
| 869 __ JumpIfSmi(object, &done); | |
| 870 | 859 |
| 871 if (FLAG_debug_code) { | 860 if (FLAG_debug_code) { |
| 872 __ AbortIfNotRootValue(heap_number_map, | 861 __ AbortIfNotRootValue(heap_number_map, |
| 873 Heap::kHeapNumberMapRootIndex, | 862 Heap::kHeapNumberMapRootIndex, |
| 874 "HeapNumberMap register clobbered."); | 863 "HeapNumberMap register clobbered."); |
| 875 } | 864 } |
| 876 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); | 865 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); |
| 877 | 866 |
| 878 // Object is a heap number. | 867 // Object is a heap number. |
| 879 // Convert the floating point value to a 32-bit integer. | 868 // Convert the floating point value to a 32-bit integer. |
| (...skipping 2718 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3598 const Register heapnumbermap = t1; | 3587 const Register heapnumbermap = t1; |
| 3599 const Register heapnumber = v0; | 3588 const Register heapnumber = v0; |
| 3600 const DoubleRegister double_base = f2; | 3589 const DoubleRegister double_base = f2; |
| 3601 const DoubleRegister double_exponent = f4; | 3590 const DoubleRegister double_exponent = f4; |
| 3602 const DoubleRegister double_result = f0; | 3591 const DoubleRegister double_result = f0; |
| 3603 const DoubleRegister double_scratch = f6; | 3592 const DoubleRegister double_scratch = f6; |
| 3604 const FPURegister single_scratch = f8; | 3593 const FPURegister single_scratch = f8; |
| 3605 const Register scratch = t5; | 3594 const Register scratch = t5; |
| 3606 const Register scratch2 = t3; | 3595 const Register scratch2 = t3; |
| 3607 | 3596 |
| 3608 Label call_runtime, done, exponent_not_smi, int_exponent; | 3597 Label call_runtime, done, int_exponent; |
| 3609 if (exponent_type_ == ON_STACK) { | 3598 if (exponent_type_ == ON_STACK) { |
| 3610 Label base_is_smi, unpack_exponent; | 3599 Label base_is_smi, unpack_exponent; |
| 3611 // The exponent and base are supplied as arguments on the stack. | 3600 // The exponent and base are supplied as arguments on the stack. |
| 3612 // This can only happen if the stub is called from non-optimized code. | 3601 // This can only happen if the stub is called from non-optimized code. |
| 3613 // Load input parameters from stack to double registers. | 3602 // Load input parameters from stack to double registers. |
| 3614 __ lw(base, MemOperand(sp, 1 * kPointerSize)); | 3603 __ lw(base, MemOperand(sp, 1 * kPointerSize)); |
| 3615 __ lw(exponent, MemOperand(sp, 0 * kPointerSize)); | 3604 __ lw(exponent, MemOperand(sp, 0 * kPointerSize)); |
| 3616 | 3605 |
| 3617 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex); | 3606 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex); |
| 3618 | 3607 |
| 3619 __ JumpIfSmi(base, &base_is_smi); | 3608 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi); |
| 3620 __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset)); | 3609 __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset)); |
| 3621 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap)); | 3610 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap)); |
| 3622 | 3611 |
| 3623 __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset)); | 3612 __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset)); |
| 3624 __ jmp(&unpack_exponent); | 3613 __ jmp(&unpack_exponent); |
| 3625 | 3614 |
| 3626 __ bind(&base_is_smi); | 3615 __ bind(&base_is_smi); |
| 3627 __ SmiUntag(base); | 3616 __ mtc1(scratch, single_scratch); |
| 3628 __ mtc1(base, single_scratch); | |
| 3629 __ cvt_d_w(double_base, single_scratch); | 3617 __ cvt_d_w(double_base, single_scratch); |
| 3630 __ bind(&unpack_exponent); | 3618 __ bind(&unpack_exponent); |
| 3631 | 3619 |
| 3632 __ JumpIfNotSmi(exponent, &exponent_not_smi); | 3620 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); |
| 3633 __ SmiUntag(exponent); | |
| 3634 __ jmp(&int_exponent); | |
| 3635 | 3621 |
| 3636 __ bind(&exponent_not_smi); | |
| 3637 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); | 3622 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); |
| 3638 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap)); | 3623 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap)); |
| 3639 __ ldc1(double_exponent, | 3624 __ ldc1(double_exponent, |
| 3640 FieldMemOperand(exponent, HeapNumber::kValueOffset)); | 3625 FieldMemOperand(exponent, HeapNumber::kValueOffset)); |
| 3641 } else if (exponent_type_ == TAGGED) { | 3626 } else if (exponent_type_ == TAGGED) { |
| 3642 // Base is already in double_base. | 3627 // Base is already in double_base. |
| 3643 __ JumpIfNotSmi(exponent, &exponent_not_smi); | 3628 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); |
| 3644 __ SmiUntag(exponent); | |
| 3645 __ jmp(&int_exponent); | |
| 3646 | 3629 |
| 3647 __ bind(&exponent_not_smi); | |
| 3648 __ ldc1(double_exponent, | 3630 __ ldc1(double_exponent, |
| 3649 FieldMemOperand(exponent, HeapNumber::kValueOffset)); | 3631 FieldMemOperand(exponent, HeapNumber::kValueOffset)); |
| 3650 } | 3632 } |
| 3651 | 3633 |
| 3652 if (exponent_type_ != INTEGER) { | 3634 if (exponent_type_ != INTEGER) { |
| 3653 Label int_exponent_convert; | 3635 Label int_exponent_convert; |
| 3654 // Detect integer exponents stored as double. | 3636 // Detect integer exponents stored as double. |
| 3655 __ EmitFPUTruncate(kRoundToMinusInf, | 3637 __ EmitFPUTruncate(kRoundToMinusInf, |
| 3656 single_scratch, | 3638 single_scratch, |
| 3657 double_exponent, | 3639 double_exponent, |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3717 __ SetCallCDoubleArguments(double_base, double_exponent); | 3699 __ SetCallCDoubleArguments(double_base, double_exponent); |
| 3718 __ CallCFunction( | 3700 __ CallCFunction( |
| 3719 ExternalReference::power_double_double_function(masm->isolate()), | 3701 ExternalReference::power_double_double_function(masm->isolate()), |
| 3720 0, 2); | 3702 0, 2); |
| 3721 } | 3703 } |
| 3722 __ pop(ra); | 3704 __ pop(ra); |
| 3723 __ GetCFunctionDoubleResult(double_result); | 3705 __ GetCFunctionDoubleResult(double_result); |
| 3724 __ jmp(&done); | 3706 __ jmp(&done); |
| 3725 | 3707 |
| 3726 __ bind(&int_exponent_convert); | 3708 __ bind(&int_exponent_convert); |
| 3727 __ mfc1(exponent, single_scratch); | 3709 __ mfc1(scratch, single_scratch); |
| 3728 } | 3710 } |
| 3729 | 3711 |
| 3730 // Calculate power with integer exponent. | 3712 // Calculate power with integer exponent. |
| 3731 __ bind(&int_exponent); | 3713 __ bind(&int_exponent); |
| 3732 | 3714 |
| 3733 __ mov(scratch, exponent); // Back up exponent. | 3715 // Get two copies of exponent in the registers scratch and exponent. |
| 3716 if (exponent_type_ == INTEGER) { |
| 3717 __ mov(scratch, exponent); |
| 3718 } else { |
| 3719 // Exponent has previously been stored into scratch as untagged integer. |
| 3720 __ mov(exponent, scratch); |
| 3721 } |
| 3722 |
| 3734 __ mov_d(double_scratch, double_base); // Back up base. | 3723 __ mov_d(double_scratch, double_base); // Back up base. |
| 3735 __ Move(double_result, 1.0); | 3724 __ Move(double_result, 1.0); |
| 3736 | 3725 |
| 3737 // Get absolute value of exponent. | 3726 // Get absolute value of exponent. |
| 3738 Label positive_exponent; | 3727 Label positive_exponent; |
| 3739 __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg)); | 3728 __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg)); |
| 3740 __ Subu(scratch, zero_reg, scratch); | 3729 __ Subu(scratch, zero_reg, scratch); |
| 3741 __ bind(&positive_exponent); | 3730 __ bind(&positive_exponent); |
| 3742 | 3731 |
| 3743 Label while_true, no_carry, loop_end; | 3732 Label while_true, no_carry, loop_end; |
| (...skipping 1547 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5291 __ Addu(a3, v0, Operand(JSRegExpResult::kSize)); | 5280 __ Addu(a3, v0, Operand(JSRegExpResult::kSize)); |
| 5292 __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array())); | 5281 __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array())); |
| 5293 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset)); | 5282 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset)); |
| 5294 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset)); | 5283 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset)); |
| 5295 __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX)); | 5284 __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX)); |
| 5296 __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset)); | 5285 __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset)); |
| 5297 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); | 5286 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); |
| 5298 | 5287 |
| 5299 // Set input, index and length fields from arguments. | 5288 // Set input, index and length fields from arguments. |
| 5300 __ lw(a1, MemOperand(sp, kPointerSize * 0)); | 5289 __ lw(a1, MemOperand(sp, kPointerSize * 0)); |
| 5290 __ lw(a2, MemOperand(sp, kPointerSize * 1)); |
| 5291 __ lw(t2, MemOperand(sp, kPointerSize * 2)); |
| 5301 __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset)); | 5292 __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset)); |
| 5302 __ lw(a1, MemOperand(sp, kPointerSize * 1)); | 5293 __ sw(a2, FieldMemOperand(v0, JSRegExpResult::kIndexOffset)); |
| 5303 __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kIndexOffset)); | 5294 __ sw(t2, FieldMemOperand(v0, JSArray::kLengthOffset)); |
| 5304 __ lw(a1, MemOperand(sp, kPointerSize * 2)); | |
| 5305 __ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset)); | |
| 5306 | 5295 |
| 5307 // Fill out the elements FixedArray. | 5296 // Fill out the elements FixedArray. |
| 5308 // v0: JSArray, tagged. | 5297 // v0: JSArray, tagged. |
| 5309 // a3: FixedArray, tagged. | 5298 // a3: FixedArray, tagged. |
| 5310 // t1: Number of elements in array, untagged. | 5299 // t1: Number of elements in array, untagged. |
| 5311 | 5300 |
| 5312 // Set map. | 5301 // Set map. |
| 5313 __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map())); | 5302 __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map())); |
| 5314 __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset)); | 5303 __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset)); |
| 5315 // Set FixedArray length. | 5304 // Set FixedArray length. |
| (...skipping 746 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6062 static const int kStringOffset = 2 * kPointerSize; | 6051 static const int kStringOffset = 2 * kPointerSize; |
| 6063 | 6052 |
| 6064 __ lw(a2, MemOperand(sp, kToOffset)); | 6053 __ lw(a2, MemOperand(sp, kToOffset)); |
| 6065 __ lw(a3, MemOperand(sp, kFromOffset)); | 6054 __ lw(a3, MemOperand(sp, kFromOffset)); |
| 6066 STATIC_ASSERT(kFromOffset == kToOffset + 4); | 6055 STATIC_ASSERT(kFromOffset == kToOffset + 4); |
| 6067 STATIC_ASSERT(kSmiTag == 0); | 6056 STATIC_ASSERT(kSmiTag == 0); |
| 6068 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); | 6057 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); |
| 6069 | 6058 |
| 6070 // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is | 6059 // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is |
| 6071 // safe in this case. | 6060 // safe in this case. |
| 6072 __ JumpIfSmi(a2, &runtime, at, USE_DELAY_SLOT); | 6061 __ UntagAndJumpIfSmi(a2, a2, &runtime); |
| 6073 __ SmiUntag(a2); | 6062 __ UntagAndJumpIfSmi(a3, a3, &runtime); |
| 6074 __ JumpIfSmi(a3, &runtime, at, USE_DELAY_SLOT); | |
| 6075 __ SmiUntag(a3); | |
| 6076 | 6063 |
| 6077 // Both a2 and a3 are untagged integers. | 6064 // Both a2 and a3 are untagged integers. |
| 6078 | 6065 |
| 6079 __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0. | 6066 __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0. |
| 6080 | 6067 |
| 6081 __ subu(a2, t5, a3); | 6068 __ subu(a2, t5, a3); |
| 6082 __ Branch(&runtime, gt, a3, Operand(t5)); // Fail if from > to. | 6069 __ Branch(&runtime, gt, a3, Operand(t5)); // Fail if from > to. |
| 6083 | 6070 |
| 6084 // Make sure first argument is a string. | 6071 // Make sure first argument is a string. |
| 6085 __ lw(v0, MemOperand(sp, kStringOffset)); | 6072 __ lw(v0, MemOperand(sp, kStringOffset)); |
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6149 __ LoadRoot(t0, Heap::kEmptyStringRootIndex); | 6136 __ LoadRoot(t0, Heap::kEmptyStringRootIndex); |
| 6150 __ Branch(&runtime, ne, t1, Operand(t0)); | 6137 __ Branch(&runtime, ne, t1, Operand(t0)); |
| 6151 __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset)); | 6138 __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset)); |
| 6152 // Update instance type. | 6139 // Update instance type. |
| 6153 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset)); | 6140 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset)); |
| 6154 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset)); | 6141 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset)); |
| 6155 __ jmp(&underlying_unpacked); | 6142 __ jmp(&underlying_unpacked); |
| 6156 | 6143 |
| 6157 __ bind(&sliced_string); | 6144 __ bind(&sliced_string); |
| 6158 // Sliced string. Fetch parent and correct start index by offset. | 6145 // Sliced string. Fetch parent and correct start index by offset. |
| 6159 __ lw(t1, FieldMemOperand(v0, SlicedString::kOffsetOffset)); | 6146 __ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset)); |
| 6160 __ sra(t1, t1, 1); | |
| 6161 __ Addu(a3, a3, t1); | |
| 6162 __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset)); | 6147 __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset)); |
| 6148 __ sra(t0, t0, 1); // Add offset to index. |
| 6149 __ Addu(a3, a3, t0); |
| 6163 // Update instance type. | 6150 // Update instance type. |
| 6164 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset)); | 6151 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset)); |
| 6165 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset)); | 6152 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset)); |
| 6166 __ jmp(&underlying_unpacked); | 6153 __ jmp(&underlying_unpacked); |
| 6167 | 6154 |
| 6168 __ bind(&seq_or_external_string); | 6155 __ bind(&seq_or_external_string); |
| 6169 // Sequential or external string. Just move string to the expected register. | 6156 // Sequential or external string. Just move string to the expected register. |
| 6170 __ mov(t1, v0); | 6157 __ mov(t1, v0); |
| 6171 | 6158 |
| 6172 __ bind(&underlying_unpacked); | 6159 __ bind(&underlying_unpacked); |
| (...skipping 1476 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 7649 __ Ret(USE_DELAY_SLOT); | 7636 __ Ret(USE_DELAY_SLOT); |
| 7650 __ mov(v0, a0); | 7637 __ mov(v0, a0); |
| 7651 } | 7638 } |
| 7652 | 7639 |
| 7653 | 7640 |
| 7654 #undef __ | 7641 #undef __ |
| 7655 | 7642 |
| 7656 } } // namespace v8::internal | 7643 } } // namespace v8::internal |
| 7657 | 7644 |
| 7658 #endif // V8_TARGET_ARCH_MIPS | 7645 #endif // V8_TARGET_ARCH_MIPS |
| OLD | NEW |