| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 2470 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2481 case Token::MUL: | 2481 case Token::MUL: |
| 2482 __ mul_d(f10, f12, f14); | 2482 __ mul_d(f10, f12, f14); |
| 2483 break; | 2483 break; |
| 2484 case Token::DIV: | 2484 case Token::DIV: |
| 2485 __ div_d(f10, f12, f14); | 2485 __ div_d(f10, f12, f14); |
| 2486 break; | 2486 break; |
| 2487 default: | 2487 default: |
| 2488 UNREACHABLE(); | 2488 UNREACHABLE(); |
| 2489 } | 2489 } |
| 2490 | 2490 |
| 2491 if (result_type_ <= BinaryOpIC::INT32) { | 2491 if (op_ != Token::DIV) { |
| 2492 // These operations produce an integer result. |
| 2493 // Try to return a smi if we can. |
| 2494 // Otherwise return a heap number if allowed, or jump to type |
| 2495 // transition. |
| 2496 |
| 2492 Register except_flag = scratch2; | 2497 Register except_flag = scratch2; |
| 2493 const FPURoundingMode kRoundingMode = op_ == Token::DIV ? | 2498 __ EmitFPUTruncate(kRoundToZero, |
| 2494 kRoundToMinusInf : kRoundToZero; | |
| 2495 const CheckForInexactConversion kConversion = op_ == Token::DIV ? | |
| 2496 kCheckForInexactConversion : kDontCheckForInexactConversion; | |
| 2497 __ EmitFPUTruncate(kRoundingMode, | |
| 2498 scratch1, | 2499 scratch1, |
| 2499 f10, | 2500 f10, |
| 2500 at, | 2501 at, |
| 2501 f16, | 2502 f16, |
| 2502 except_flag, | 2503 except_flag); |
| 2503 kConversion); | 2504 |
| 2504 // If except_flag != 0, result does not fit in a 32-bit integer. | 2505 if (result_type_ <= BinaryOpIC::INT32) { |
| 2505 __ Branch(&transition, ne, except_flag, Operand(zero_reg)); | 2506 // If except_flag != 0, result does not fit in a 32-bit integer. |
| 2506 // Try to tag the result as a Smi, return heap number on overflow. | 2507 __ Branch(&transition, ne, except_flag, Operand(zero_reg)); |
| 2507 __ SmiTagCheckOverflow(scratch1, scratch1, scratch2); | 2508 } |
| 2509 |
| 2510 // Check if the result fits in a smi. |
| 2511 __ Addu(scratch2, scratch1, Operand(0x40000000)); |
| 2512 // If not try to return a heap number. |
| 2508 __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg)); | 2513 __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg)); |
| 2509 // Check for minus zero, transition in that case (because we need | 2514 // Check for minus zero. Return heap number for minus zero if |
| 2510 // to return a heap number). | 2515 // double results are allowed; otherwise transition. |
| 2511 Label not_zero; | 2516 Label not_zero; |
| 2512 ASSERT(kSmiTag == 0); | |
| 2513 __ Branch(¬_zero, ne, scratch1, Operand(zero_reg)); | 2517 __ Branch(¬_zero, ne, scratch1, Operand(zero_reg)); |
| 2514 __ mfc1(scratch2, f11); | 2518 __ mfc1(scratch2, f11); |
| 2515 __ And(scratch2, scratch2, HeapNumber::kSignMask); | 2519 __ And(scratch2, scratch2, HeapNumber::kSignMask); |
| 2516 __ Branch(&transition, ne, scratch2, Operand(zero_reg)); | 2520 __ Branch(result_type_ <= BinaryOpIC::INT32 ? &transition |
| 2521 : &return_heap_number, |
| 2522 ne, |
| 2523 scratch2, |
| 2524 Operand(zero_reg)); |
| 2517 __ bind(¬_zero); | 2525 __ bind(¬_zero); |
| 2518 | 2526 |
| 2527 // Tag the result and return. |
| 2519 __ Ret(USE_DELAY_SLOT); | 2528 __ Ret(USE_DELAY_SLOT); |
| 2520 __ mov(v0, scratch1); | 2529 __ SmiTag(v0, scratch1); // SmiTag emits one instruction. |
| 2530 } else { |
| 2531 // DIV just falls through to allocating a heap number. |
| 2521 } | 2532 } |
| 2522 | 2533 |
| 2523 __ bind(&return_heap_number); | 2534 __ bind(&return_heap_number); |
| 2524 // Return a heap number, or fall through to type transition or runtime | 2535 // Return a heap number, or fall through to type transition or runtime |
| 2525 // call if we can't. | 2536 // call if we can't. |
| 2526 // We are using FPU registers so s0 is available. | 2537 // We are using FPU registers so s0 is available. |
| 2527 heap_number_result = s0; | 2538 heap_number_result = s0; |
| 2528 BinaryOpStub_GenerateHeapResultAllocation(masm, | 2539 BinaryOpStub_GenerateHeapResultAllocation(masm, |
| 2529 heap_number_result, | 2540 heap_number_result, |
| 2530 heap_number_map, | 2541 heap_number_map, |
| (...skipping 4363 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6894 // Dereference the address and check for this. | 6905 // Dereference the address and check for this. |
| 6895 __ lw(t0, MemOperand(t9)); | 6906 __ lw(t0, MemOperand(t9)); |
| 6896 __ Assert(ne, "Received invalid return address.", t0, | 6907 __ Assert(ne, "Received invalid return address.", t0, |
| 6897 Operand(reinterpret_cast<uint32_t>(kZapValue))); | 6908 Operand(reinterpret_cast<uint32_t>(kZapValue))); |
| 6898 } | 6909 } |
| 6899 __ Jump(t9); | 6910 __ Jump(t9); |
| 6900 } | 6911 } |
| 6901 | 6912 |
| 6902 | 6913 |
| 6903 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, | 6914 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, |
| 6915 ExternalReference function) { |
| 6916 __ li(t9, Operand(function)); |
| 6917 this->GenerateCall(masm, t9); |
| 6918 } |
| 6919 |
| 6920 |
| 6921 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, |
| 6904 Register target) { | 6922 Register target) { |
| 6905 __ Move(t9, target); | 6923 __ Move(t9, target); |
| 6906 __ AssertStackIsAligned(); | 6924 __ AssertStackIsAligned(); |
| 6907 // Allocate space for arg slots. | 6925 // Allocate space for arg slots. |
| 6908 __ Subu(sp, sp, kCArgsSlotsSize); | 6926 __ Subu(sp, sp, kCArgsSlotsSize); |
| 6909 | 6927 |
| 6910 // Block the trampoline pool through the whole function to make sure the | 6928 // Block the trampoline pool through the whole function to make sure the |
| 6911 // number of generated instructions is constant. | 6929 // number of generated instructions is constant. |
| 6912 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm); | 6930 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm); |
| 6913 | 6931 |
| (...skipping 561 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 7475 | 7493 |
| 7476 __ bind(&need_incremental); | 7494 __ bind(&need_incremental); |
| 7477 | 7495 |
| 7478 // Fall through when we need to inform the incremental marker. | 7496 // Fall through when we need to inform the incremental marker. |
| 7479 } | 7497 } |
| 7480 | 7498 |
| 7481 | 7499 |
| 7482 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { | 7500 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { |
| 7483 // ----------- S t a t e ------------- | 7501 // ----------- S t a t e ------------- |
| 7484 // -- a0 : element value to store | 7502 // -- a0 : element value to store |
| 7503 // -- a1 : array literal |
| 7504 // -- a2 : map of array literal |
| 7485 // -- a3 : element index as smi | 7505 // -- a3 : element index as smi |
| 7486 // -- sp[0] : array literal index in function as smi | 7506 // -- t0 : array literal index in function as smi |
| 7487 // -- sp[4] : array literal | |
| 7488 // clobbers a1, a2, t0 | |
| 7489 // ----------------------------------- | 7507 // ----------------------------------- |
| 7490 | 7508 |
| 7491 Label element_done; | 7509 Label element_done; |
| 7492 Label double_elements; | 7510 Label double_elements; |
| 7493 Label smi_element; | 7511 Label smi_element; |
| 7494 Label slow_elements; | 7512 Label slow_elements; |
| 7495 Label fast_elements; | 7513 Label fast_elements; |
| 7496 | 7514 |
| 7497 // Get array literal index, array literal and its map. | |
| 7498 __ lw(t0, MemOperand(sp, 0 * kPointerSize)); | |
| 7499 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); | |
| 7500 __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset)); | |
| 7501 | |
| 7502 __ CheckFastElements(a2, t1, &double_elements); | 7515 __ CheckFastElements(a2, t1, &double_elements); |
| 7503 // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements | 7516 // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements |
| 7504 __ JumpIfSmi(a0, &smi_element); | 7517 __ JumpIfSmi(a0, &smi_element); |
| 7505 __ CheckFastSmiElements(a2, t1, &fast_elements); | 7518 __ CheckFastSmiElements(a2, t1, &fast_elements); |
| 7506 | 7519 |
| 7507 // Store into the array literal requires a elements transition. Call into | 7520 // Store into the array literal requires a elements transition. Call into |
| 7508 // the runtime. | 7521 // the runtime. |
| 7509 __ bind(&slow_elements); | 7522 __ bind(&slow_elements); |
| 7510 // call. | 7523 // call. |
| 7511 __ Push(a1, a3, a0); | 7524 __ Push(a1, a3, a0); |
| (...skipping 403 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 7915 __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); | 7928 __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); |
| 7916 } | 7929 } |
| 7917 } | 7930 } |
| 7918 | 7931 |
| 7919 | 7932 |
| 7920 #undef __ | 7933 #undef __ |
| 7921 | 7934 |
| 7922 } } // namespace v8::internal | 7935 } } // namespace v8::internal |
| 7923 | 7936 |
| 7924 #endif // V8_TARGET_ARCH_MIPS | 7937 #endif // V8_TARGET_ARCH_MIPS |
| OLD | NEW |