Index: src/mips/code-stubs-mips.cc |
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc |
index f09e9c3b2d7b6b22e7019735dc5e0bc5b66bfbbb..5c8d08c5c6caad2a67bb65319a64a463d4bf7ce6 100644 |
--- a/src/mips/code-stubs-mips.cc |
+++ b/src/mips/code-stubs-mips.cc |
@@ -2488,36 +2488,47 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
UNREACHABLE(); |
} |
- if (result_type_ <= BinaryOpIC::INT32) { |
+ if (op_ != Token::DIV) { |
+ // These operations produce an integer result. |
+ // Try to return a smi if we can. |
+ // Otherwise return a heap number if allowed, or jump to type |
+ // transition. |
+ |
Register except_flag = scratch2; |
- const FPURoundingMode kRoundingMode = op_ == Token::DIV ? |
- kRoundToMinusInf : kRoundToZero; |
- const CheckForInexactConversion kConversion = op_ == Token::DIV ? |
- kCheckForInexactConversion : kDontCheckForInexactConversion; |
- __ EmitFPUTruncate(kRoundingMode, |
+ __ EmitFPUTruncate(kRoundToZero, |
scratch1, |
f10, |
at, |
f16, |
- except_flag, |
- kConversion); |
- // If except_flag != 0, result does not fit in a 32-bit integer. |
- __ Branch(&transition, ne, except_flag, Operand(zero_reg)); |
- // Try to tag the result as a Smi, return heap number on overflow. |
- __ SmiTagCheckOverflow(scratch1, scratch1, scratch2); |
+ except_flag); |
+ |
+ if (result_type_ <= BinaryOpIC::INT32) { |
+ // If except_flag != 0, result does not fit in a 32-bit integer. |
+ __ Branch(&transition, ne, except_flag, Operand(zero_reg)); |
+ } |
+ |
+ // Check if the result fits in a smi. |
+ __ Addu(scratch2, scratch1, Operand(0x40000000)); |
+ // If not try to return a heap number. |
__ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg)); |
- // Check for minus zero, transition in that case (because we need |
- // to return a heap number). |
+ // Check for minus zero. Return heap number for minus zero if |
+ // double results are allowed; otherwise transition. |
Label not_zero; |
- ASSERT(kSmiTag == 0); |
__ Branch(¬_zero, ne, scratch1, Operand(zero_reg)); |
__ mfc1(scratch2, f11); |
__ And(scratch2, scratch2, HeapNumber::kSignMask); |
- __ Branch(&transition, ne, scratch2, Operand(zero_reg)); |
+ __ Branch(result_type_ <= BinaryOpIC::INT32 ? &transition |
+ : &return_heap_number, |
+ ne, |
+ scratch2, |
+ Operand(zero_reg)); |
__ bind(¬_zero); |
+ // Tag the result and return. |
__ Ret(USE_DELAY_SLOT); |
- __ mov(v0, scratch1); |
+ __ SmiTag(v0, scratch1); // SmiTag emits one instruction. |
+ } else { |
+ // DIV just falls through to allocating a heap number. |
} |
__ bind(&return_heap_number); |
@@ -6901,6 +6912,13 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) { |
void DirectCEntryStub::GenerateCall(MacroAssembler* masm, |
+ ExternalReference function) { |
+ __ li(t9, Operand(function)); |
+ this->GenerateCall(masm, t9); |
+} |
+ |
+ |
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm, |
Register target) { |
__ Move(t9, target); |
__ AssertStackIsAligned(); |
@@ -7482,10 +7500,10 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker( |
void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { |
// ----------- S t a t e ------------- |
// -- a0 : element value to store |
+ // -- a1 : array literal |
+ // -- a2 : map of array literal |
// -- a3 : element index as smi |
- // -- sp[0] : array literal index in function as smi |
- // -- sp[4] : array literal |
- // clobbers a1, a2, t0 |
+ // -- t0 : array literal index in function as smi |
// ----------------------------------- |
Label element_done; |
@@ -7494,11 +7512,6 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { |
Label slow_elements; |
Label fast_elements; |
- // Get array literal index, array literal and its map. |
- __ lw(t0, MemOperand(sp, 0 * kPointerSize)); |
- __ lw(a1, MemOperand(sp, 1 * kPointerSize)); |
- __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset)); |
- |
__ CheckFastElements(a2, t1, &double_elements); |
// Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements |
__ JumpIfSmi(a0, &smi_element); |