Index: src/ia32/code-stubs-ia32.cc |
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc |
index 7ea71e47eb059b1cf788c2970eddc211e00a5a02..e92957479aae158c4d593a0889cb57d2d8d29e6b 100644 |
--- a/src/ia32/code-stubs-ia32.cc |
+++ b/src/ia32/code-stubs-ia32.cc |
@@ -735,6 +735,12 @@ class FloatingPointHelper : public AllStatic { |
static void CheckSSE2OperandsAreInt32(MacroAssembler* masm, |
Label* non_int32, |
Register scratch); |
+ |
+ static void CheckSSE2OperandIsInt32(MacroAssembler* masm, |
+ Label* non_int32, |
+ XMMRegister operand, |
+ Register scratch, |
+ XMMRegister xmm_scratch); |
}; |
@@ -755,11 +761,20 @@ static void IntegerConvert(MacroAssembler* masm, |
// Get exponent alone in scratch2. |
__ mov(scratch2, scratch); |
__ and_(scratch2, HeapNumber::kExponentMask); |
+ __ shr(scratch2, HeapNumber::kExponentShift); |
+ __ sub(scratch2, Immediate(HeapNumber::kExponentBias)); |
+ // Load ecx with zero. We use this either for the final shift or |
+ // for the answer. |
+ __ xor_(ecx, ecx); |
+ // If the exponent is above 83, the number contains no significant |
+ // bits in the range 0..2^31, so the result is zero. |
+ static const uint32_t kResultIsZeroExponent = 83; |
+ __ cmp(scratch2, Immediate(kResultIsZeroExponent)); |
+ __ j(above, &done); |
if (use_sse3) { |
CpuFeatures::Scope scope(SSE3); |
// Check whether the exponent is too big for a 64 bit signed integer. |
- static const uint32_t kTooBigExponent = |
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift; |
+ static const uint32_t kTooBigExponent = 63; |
__ cmp(scratch2, Immediate(kTooBigExponent)); |
__ j(greater_equal, conversion_failure); |
// Load x87 register with heap number. |
@@ -771,15 +786,11 @@ static void IntegerConvert(MacroAssembler* masm, |
__ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx. |
__ add(esp, Immediate(sizeof(uint64_t))); // Nolint. |
} else { |
- // Load ecx with zero. We use this either for the final shift or |
- // for the answer. |
- __ xor_(ecx, ecx); |
// Check whether the exponent matches a 32 bit signed int that cannot be |
// represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the |
// exponent is 30 (biased). This is the exponent that we are fastest at and |
// also the highest exponent we can handle here. |
- const uint32_t non_smi_exponent = |
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; |
+ const uint32_t non_smi_exponent = 30; |
__ cmp(scratch2, Immediate(non_smi_exponent)); |
// If we have a match of the int32-but-not-Smi exponent then skip some |
// logic. |
@@ -791,8 +802,7 @@ static void IntegerConvert(MacroAssembler* masm, |
{ |
// Handle a big exponent. The only reason we have this code is that the |
// >>> operator has a tendency to generate numbers with an exponent of 31. |
- const uint32_t big_non_smi_exponent = |
- (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift; |
+ const uint32_t big_non_smi_exponent = 31; |
__ cmp(scratch2, Immediate(big_non_smi_exponent)); |
__ j(not_equal, conversion_failure); |
// We have the big exponent, typically from >>>. This means the number is |
@@ -821,19 +831,8 @@ static void IntegerConvert(MacroAssembler* masm, |
} |
__ bind(&normal_exponent); |
- // Exponent word in scratch, exponent part of exponent word in scratch2. |
- // Zero in ecx. |
- // We know the exponent is smaller than 30 (biased). If it is less than |
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e. |
- // it rounds to zero. |
- const uint32_t zero_exponent = |
- (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; |
- __ sub(scratch2, Immediate(zero_exponent)); |
- // ecx already has a Smi zero. |
- __ j(less, &done, Label::kNear); |
- |
- // We have a shifted exponent between 0 and 30 in scratch2. |
- __ shr(scratch2, HeapNumber::kExponentShift); |
+ // Exponent word in scratch, exponent in scratch2. Zero in ecx. |
+ // We know that 0 <= exponent < 30. |
__ mov(ecx, Immediate(30)); |
__ sub(ecx, scratch2); |
@@ -868,8 +867,8 @@ static void IntegerConvert(MacroAssembler* masm, |
__ jmp(&done, Label::kNear); |
__ bind(&negative); |
__ sub(ecx, scratch2); |
- __ bind(&done); |
} |
+ __ bind(&done); |
} |
@@ -1192,16 +1191,17 @@ void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) { |
} |
+void BinaryOpStub::Initialize() { |
+ platform_specific_bit_ = CpuFeatures::IsSupported(SSE3); |
+} |
+ |
+ |
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
__ pop(ecx); // Save return address. |
__ push(edx); |
__ push(eax); |
// Left and right arguments are now on top. |
- // Push this stub's key. Although the operation and the type info are |
- // encoded into the key, the encoding is opaque, so push them too. |
__ push(Immediate(Smi::FromInt(MinorKey()))); |
- __ push(Immediate(Smi::FromInt(op_))); |
- __ push(Immediate(Smi::FromInt(operands_type_))); |
__ push(ecx); // Push return address. |
@@ -1210,7 +1210,7 @@ void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
__ TailCallExternalReference( |
ExternalReference(IC_Utility(IC::kBinaryOp_Patch), |
masm->isolate()), |
- 5, |
+ 3, |
1); |
} |
@@ -1220,11 +1220,7 @@ void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) { |
__ pop(ecx); // Save return address. |
// Left and right arguments are already on top of the stack. |
- // Push this stub's key. Although the operation and the type info are |
- // encoded into the key, the encoding is opaque, so push them too. |
__ push(Immediate(Smi::FromInt(MinorKey()))); |
- __ push(Immediate(Smi::FromInt(op_))); |
- __ push(Immediate(Smi::FromInt(operands_type_))); |
__ push(ecx); // Push return address. |
@@ -1233,73 +1229,22 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) { |
__ TailCallExternalReference( |
ExternalReference(IC_Utility(IC::kBinaryOp_Patch), |
masm->isolate()), |
- 5, |
+ 3, |
1); |
} |
-void BinaryOpStub::Generate(MacroAssembler* masm) { |
- // Explicitly allow generation of nested stubs. It is safe here because |
- // generation code does not use any raw pointers. |
- AllowStubCallsScope allow_stub_calls(masm, true); |
- |
- switch (operands_type_) { |
- case BinaryOpIC::UNINITIALIZED: |
- GenerateTypeTransition(masm); |
- break; |
- case BinaryOpIC::SMI: |
- GenerateSmiStub(masm); |
- break; |
- case BinaryOpIC::INT32: |
- GenerateInt32Stub(masm); |
- break; |
- case BinaryOpIC::HEAP_NUMBER: |
- GenerateHeapNumberStub(masm); |
- break; |
- case BinaryOpIC::ODDBALL: |
- GenerateOddballStub(masm); |
- break; |
- case BinaryOpIC::BOTH_STRING: |
- GenerateBothStringStub(masm); |
- break; |
- case BinaryOpIC::STRING: |
- GenerateStringStub(masm); |
- break; |
- case BinaryOpIC::GENERIC: |
- GenerateGeneric(masm); |
- break; |
- default: |
- UNREACHABLE(); |
- } |
-} |
- |
- |
-void BinaryOpStub::PrintName(StringStream* stream) { |
- const char* op_name = Token::Name(op_); |
- const char* overwrite_name; |
- switch (mode_) { |
- case NO_OVERWRITE: overwrite_name = "Alloc"; break; |
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; |
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; |
- default: overwrite_name = "UnknownOverwrite"; break; |
- } |
- stream->Add("BinaryOpStub_%s_%s_%s", |
- op_name, |
- overwrite_name, |
- BinaryOpIC::GetName(operands_type_)); |
-} |
- |
- |
-void BinaryOpStub::GenerateSmiCode( |
+static void BinaryOpStub_GenerateSmiCode( |
MacroAssembler* masm, |
Label* slow, |
- SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { |
+ BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, |
+ Token::Value op) { |
// 1. Move arguments into edx, eax except for DIV and MOD, which need the |
// dividend in eax and edx free for the division. Use eax, ebx for those. |
Comment load_comment(masm, "-- Load arguments"); |
Register left = edx; |
Register right = eax; |
- if (op_ == Token::DIV || op_ == Token::MOD) { |
+ if (op == Token::DIV || op == Token::MOD) { |
left = eax; |
right = ebx; |
__ mov(ebx, eax); |
@@ -1312,7 +1257,7 @@ void BinaryOpStub::GenerateSmiCode( |
Label not_smis; |
Register combined = ecx; |
ASSERT(!left.is(combined) && !right.is(combined)); |
- switch (op_) { |
+ switch (op) { |
case Token::BIT_OR: |
// Perform the operation into eax and smi check the result. Preserve |
// eax in case the result is not a smi. |
@@ -1356,7 +1301,7 @@ void BinaryOpStub::GenerateSmiCode( |
// eax and check the result if necessary. |
Comment perform_smi(masm, "-- Perform smi operation"); |
Label use_fp_on_smis; |
- switch (op_) { |
+ switch (op) { |
case Token::BIT_OR: |
// Nothing to do. |
break; |
@@ -1490,7 +1435,7 @@ void BinaryOpStub::GenerateSmiCode( |
} |
// 5. Emit return of result in eax. Some operations have registers pushed. |
- switch (op_) { |
+ switch (op) { |
case Token::ADD: |
case Token::SUB: |
case Token::MUL: |
@@ -1513,9 +1458,9 @@ void BinaryOpStub::GenerateSmiCode( |
// 6. For some operations emit inline code to perform floating point |
// operations on known smis (e.g., if the result of the operation |
// overflowed the smi range). |
- if (allow_heapnumber_results == NO_HEAPNUMBER_RESULTS) { |
+ if (allow_heapnumber_results == BinaryOpStub::NO_HEAPNUMBER_RESULTS) { |
__ bind(&use_fp_on_smis); |
- switch (op_) { |
+ switch (op) { |
// Undo the effects of some operations, and some register moves. |
case Token::SHL: |
// The arguments are saved on the stack, and only used from there. |
@@ -1543,8 +1488,8 @@ void BinaryOpStub::GenerateSmiCode( |
} |
__ jmp(¬_smis); |
} else { |
- ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS); |
- switch (op_) { |
+ ASSERT(allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS); |
+ switch (op) { |
case Token::SHL: |
case Token::SHR: { |
Comment perform_float(masm, "-- Perform float operation on smis"); |
@@ -1555,13 +1500,13 @@ void BinaryOpStub::GenerateSmiCode( |
// Store the result in the HeapNumber and return. |
// It's OK to overwrite the arguments on the stack because we |
// are about to return. |
- if (op_ == Token::SHR) { |
+ if (op == Token::SHR) { |
__ mov(Operand(esp, 1 * kPointerSize), left); |
__ mov(Operand(esp, 2 * kPointerSize), Immediate(0)); |
__ fild_d(Operand(esp, 1 * kPointerSize)); |
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); |
} else { |
- ASSERT_EQ(Token::SHL, op_); |
+ ASSERT_EQ(Token::SHL, op); |
if (CpuFeatures::IsSupported(SSE2)) { |
CpuFeatures::Scope use_sse2(SSE2); |
__ cvtsi2sd(xmm0, left); |
@@ -1583,7 +1528,7 @@ void BinaryOpStub::GenerateSmiCode( |
Comment perform_float(masm, "-- Perform float operation on smis"); |
__ bind(&use_fp_on_smis); |
// Restore arguments to edx, eax. |
- switch (op_) { |
+ switch (op) { |
case Token::ADD: |
// Revert right = right + left. |
__ sub(right, left); |
@@ -1609,7 +1554,7 @@ void BinaryOpStub::GenerateSmiCode( |
if (CpuFeatures::IsSupported(SSE2)) { |
CpuFeatures::Scope use_sse2(SSE2); |
FloatingPointHelper::LoadSSE2Smis(masm, ebx); |
- switch (op_) { |
+ switch (op) { |
case Token::ADD: __ addsd(xmm0, xmm1); break; |
case Token::SUB: __ subsd(xmm0, xmm1); break; |
case Token::MUL: __ mulsd(xmm0, xmm1); break; |
@@ -1619,7 +1564,7 @@ void BinaryOpStub::GenerateSmiCode( |
__ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0); |
} else { // SSE2 not available, use FPU. |
FloatingPointHelper::LoadFloatSmis(masm, ebx); |
- switch (op_) { |
+ switch (op) { |
case Token::ADD: __ faddp(1); break; |
case Token::SUB: __ fsubp(1); break; |
case Token::MUL: __ fmulp(1); break; |
@@ -1642,7 +1587,7 @@ void BinaryOpStub::GenerateSmiCode( |
// edx and eax. |
Comment done_comment(masm, "-- Enter non-smi code"); |
__ bind(¬_smis); |
- switch (op_) { |
+ switch (op) { |
case Token::BIT_OR: |
case Token::SHL: |
case Token::SAR: |
@@ -1689,9 +1634,11 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
if (result_type_ == BinaryOpIC::UNINITIALIZED || |
result_type_ == BinaryOpIC::SMI) { |
- GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS); |
+ BinaryOpStub_GenerateSmiCode( |
+ masm, &call_runtime, NO_HEAPNUMBER_RESULTS, op_); |
} else { |
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); |
+ BinaryOpStub_GenerateSmiCode( |
+ masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); |
} |
__ bind(&call_runtime); |
switch (op_) { |
@@ -1716,19 +1663,9 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
} |
-void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) { |
- ASSERT(operands_type_ == BinaryOpIC::STRING); |
- ASSERT(op_ == Token::ADD); |
- // Try to add arguments as strings, otherwise, transition to the generic |
- // BinaryOpIC type. |
- GenerateAddStrings(masm); |
- GenerateTypeTransition(masm); |
-} |
- |
- |
void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { |
Label call_runtime; |
- ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING); |
+ ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); |
ASSERT(op_ == Token::ADD); |
// If both arguments are strings, call the string add stub. |
// Otherwise, do a transition. |
@@ -1756,6 +1693,11 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { |
} |
+static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, |
+ Label* alloc_failure, |
+ OverwriteMode mode); |
+ |
+ |
// Input: |
// edx: left operand (tagged) |
// eax: right operand (tagged) |
@@ -1763,7 +1705,7 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { |
// eax: result (tagged) |
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
Label call_runtime; |
- ASSERT(operands_type_ == BinaryOpIC::INT32); |
+ ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); |
// Floating point case. |
switch (op_) { |
@@ -1776,6 +1718,18 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
Label not_int32; |
if (CpuFeatures::IsSupported(SSE2)) { |
CpuFeatures::Scope use_sse2(SSE2); |
+ // It could be that only SMIs have been seen at either the left |
+ // or the right operand. For precise type feedback, patch the IC |
+ // again if this changes. |
+ // In theory, we would need the same check in the non-SSE2 case, |
+ // but since we don't support Crankshaft on such hardware we can |
+ // afford not to care about precise type feedback. |
+ if (left_type_ == BinaryOpIC::SMI) { |
+ __ JumpIfNotSmi(edx, ¬_int32); |
+ } |
+ if (right_type_ == BinaryOpIC::SMI) { |
+ __ JumpIfNotSmi(eax, ¬_int32); |
+ } |
FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats); |
FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, ¬_int32, ecx); |
if (op_ == Token::MOD) { |
@@ -1798,7 +1752,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
__ test(ecx, Immediate(1)); |
__ j(zero, ¬_int32); |
} |
- GenerateHeapResultAllocation(masm, &call_runtime); |
+ BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_); |
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); |
__ ret(0); |
} |
@@ -1824,7 +1778,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
default: UNREACHABLE(); |
} |
Label after_alloc_failure; |
- GenerateHeapResultAllocation(masm, &after_alloc_failure); |
+ BinaryOpStub_GenerateHeapResultAllocation( |
+ masm, &after_alloc_failure, mode_); |
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); |
__ ret(0); |
__ bind(&after_alloc_failure); |
@@ -1849,10 +1804,14 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
Label not_floats; |
Label not_int32; |
Label non_smi_result; |
+ // We do not check the input arguments here, as any value is |
+ // unconditionally truncated to an int32 anyway. To get the |
+ // right optimized code, int32 type feedback is just right. |
+ bool use_sse3 = platform_specific_bit_; |
FloatingPointHelper::LoadUnknownsAsIntegers(masm, |
- use_sse3_, |
+ use_sse3, |
¬_floats); |
- FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_, |
+ FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3, |
¬_int32); |
switch (op_) { |
case Token::BIT_OR: __ or_(eax, ecx); break; |
@@ -1925,44 +1884,24 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
switch (op_) { |
case Token::ADD: |
- GenerateRegisterArgsPush(masm); |
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); |
- break; |
case Token::SUB: |
- GenerateRegisterArgsPush(masm); |
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); |
- break; |
case Token::MUL: |
- GenerateRegisterArgsPush(masm); |
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); |
- break; |
case Token::DIV: |
GenerateRegisterArgsPush(masm); |
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); |
break; |
case Token::MOD: |
- break; |
+ return; // Handled above. |
case Token::BIT_OR: |
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); |
- break; |
case Token::BIT_AND: |
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); |
- break; |
case Token::BIT_XOR: |
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); |
- break; |
case Token::SAR: |
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); |
- break; |
case Token::SHL: |
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); |
- break; |
case Token::SHR: |
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); |
break; |
default: |
UNREACHABLE(); |
} |
+ GenerateCallRuntime(masm); |
} |
@@ -2011,7 +1950,28 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { |
Label not_floats; |
if (CpuFeatures::IsSupported(SSE2)) { |
CpuFeatures::Scope use_sse2(SSE2); |
+ |
+ // It could be that only SMIs have been seen at either the left |
+ // or the right operand. For precise type feedback, patch the IC |
+ // again if this changes. |
+ // In theory, we would need the same check in the non-SSE2 case, |
+ // but since we don't support Crankshaft on such hardware we can |
+ // afford not to care about precise type feedback. |
+ if (left_type_ == BinaryOpIC::SMI) { |
+ __ JumpIfNotSmi(edx, ¬_floats); |
+ } |
+ if (right_type_ == BinaryOpIC::SMI) { |
+ __ JumpIfNotSmi(eax, ¬_floats); |
+ } |
FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats); |
+ if (left_type_ == BinaryOpIC::INT32) { |
+ FloatingPointHelper::CheckSSE2OperandIsInt32( |
+ masm, ¬_floats, xmm0, ecx, xmm2); |
+ } |
+ if (right_type_ == BinaryOpIC::INT32) { |
+ FloatingPointHelper::CheckSSE2OperandIsInt32( |
+ masm, ¬_floats, xmm1, ecx, xmm2); |
+ } |
switch (op_) { |
case Token::ADD: __ addsd(xmm0, xmm1); break; |
@@ -2020,7 +1980,7 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { |
case Token::DIV: __ divsd(xmm0, xmm1); break; |
default: UNREACHABLE(); |
} |
- GenerateHeapResultAllocation(masm, &call_runtime); |
+ BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_); |
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); |
__ ret(0); |
} else { // SSE2 not available, use FPU. |
@@ -2037,7 +1997,8 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { |
default: UNREACHABLE(); |
} |
Label after_alloc_failure; |
- GenerateHeapResultAllocation(masm, &after_alloc_failure); |
+ BinaryOpStub_GenerateHeapResultAllocation( |
+ masm, &after_alloc_failure, mode_); |
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); |
__ ret(0); |
__ bind(&after_alloc_failure); |
@@ -2063,8 +2024,12 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { |
GenerateRegisterArgsPush(masm); |
Label not_floats; |
Label non_smi_result; |
+ // We do not check the input arguments here, as any value is |
+ // unconditionally truncated to an int32 anyway. To get the |
+ // right optimized code, int32 type feedback is just right. |
+ bool use_sse3 = platform_specific_bit_; |
FloatingPointHelper::LoadUnknownsAsIntegers(masm, |
- use_sse3_, |
+ use_sse3, |
¬_floats); |
switch (op_) { |
case Token::BIT_OR: __ or_(eax, ecx); break; |
@@ -2136,46 +2101,23 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { |
switch (op_) { |
case Token::ADD: |
- GenerateRegisterArgsPush(masm); |
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); |
- break; |
case Token::SUB: |
- GenerateRegisterArgsPush(masm); |
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); |
- break; |
case Token::MUL: |
- GenerateRegisterArgsPush(masm); |
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); |
- break; |
case Token::DIV: |
- GenerateRegisterArgsPush(masm); |
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); |
- break; |
case Token::MOD: |
GenerateRegisterArgsPush(masm); |
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); |
break; |
case Token::BIT_OR: |
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); |
- break; |
case Token::BIT_AND: |
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); |
- break; |
case Token::BIT_XOR: |
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); |
- break; |
case Token::SAR: |
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); |
- break; |
case Token::SHL: |
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); |
- break; |
case Token::SHR: |
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); |
break; |
default: |
UNREACHABLE(); |
} |
+ GenerateCallRuntime(masm); |
} |
@@ -2204,7 +2146,8 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
UNREACHABLE(); |
} |
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); |
+ BinaryOpStub_GenerateSmiCode( |
+ masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); |
// Floating point case. |
switch (op_) { |
@@ -2224,7 +2167,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
case Token::DIV: __ divsd(xmm0, xmm1); break; |
default: UNREACHABLE(); |
} |
- GenerateHeapResultAllocation(masm, &call_runtime); |
+ BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_); |
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); |
__ ret(0); |
} else { // SSE2 not available, use FPU. |
@@ -2241,7 +2184,8 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
default: UNREACHABLE(); |
} |
Label after_alloc_failure; |
- GenerateHeapResultAllocation(masm, &after_alloc_failure); |
+ BinaryOpStub_GenerateHeapResultAllocation( |
+ masm, &after_alloc_failure, mode_); |
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); |
__ ret(0); |
__ bind(&after_alloc_failure); |
@@ -2262,8 +2206,9 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
case Token::SHL: |
case Token::SHR: { |
Label non_smi_result; |
+ bool use_sse3 = platform_specific_bit_; |
FloatingPointHelper::LoadUnknownsAsIntegers(masm, |
- use_sse3_, |
+ use_sse3, |
&call_runtime); |
switch (op_) { |
case Token::BIT_OR: __ or_(eax, ecx); break; |
@@ -2330,48 +2275,26 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
// result. |
__ bind(&call_runtime); |
switch (op_) { |
- case Token::ADD: { |
+ case Token::ADD: |
GenerateAddStrings(masm); |
- GenerateRegisterArgsPush(masm); |
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); |
- break; |
- } |
+ // Fall through. |
case Token::SUB: |
- GenerateRegisterArgsPush(masm); |
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); |
- break; |
case Token::MUL: |
- GenerateRegisterArgsPush(masm); |
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); |
- break; |
case Token::DIV: |
GenerateRegisterArgsPush(masm); |
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); |
break; |
case Token::MOD: |
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); |
- break; |
case Token::BIT_OR: |
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); |
- break; |
case Token::BIT_AND: |
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); |
- break; |
case Token::BIT_XOR: |
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); |
- break; |
case Token::SAR: |
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); |
- break; |
case Token::SHL: |
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); |
- break; |
case Token::SHR: |
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); |
break; |
default: |
UNREACHABLE(); |
} |
+ GenerateCallRuntime(masm); |
} |
@@ -2407,11 +2330,10 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { |
} |
-void BinaryOpStub::GenerateHeapResultAllocation( |
- MacroAssembler* masm, |
- Label* alloc_failure) { |
+static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, |
+ Label* alloc_failure, |
+ OverwriteMode mode) { |
Label skip_allocation; |
- OverwriteMode mode = mode_; |
switch (mode) { |
case OVERWRITE_LEFT: { |
// If the argument in edx is already an object, we skip the |
@@ -2923,16 +2845,22 @@ void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm, |
void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm, |
Label* non_int32, |
Register scratch) { |
- __ cvttsd2si(scratch, Operand(xmm0)); |
- __ cvtsi2sd(xmm2, scratch); |
- __ ucomisd(xmm0, xmm2); |
- __ j(not_zero, non_int32); |
- __ j(carry, non_int32); |
- __ cvttsd2si(scratch, Operand(xmm1)); |
- __ cvtsi2sd(xmm2, scratch); |
- __ ucomisd(xmm1, xmm2); |
- __ j(not_zero, non_int32); |
- __ j(carry, non_int32); |
+ CheckSSE2OperandIsInt32(masm, non_int32, xmm0, scratch, xmm2); |
+ CheckSSE2OperandIsInt32(masm, non_int32, xmm1, scratch, xmm2); |
+} |
+ |
+ |
+void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm, |
+ Label* non_int32, |
+ XMMRegister operand, |
+ Register scratch, |
+ XMMRegister xmm_scratch) { |
+ __ cvttsd2si(scratch, Operand(operand)); |
+ __ cvtsi2sd(xmm_scratch, scratch); |
+ __ pcmpeqd(xmm_scratch, operand); |
+ __ movmskpd(scratch, xmm_scratch); |
+ __ test(scratch, Immediate(1)); |
+ __ j(zero, non_int32); |
} |
@@ -4321,30 +4249,59 @@ static int NegativeComparisonResult(Condition cc) { |
return (cc == greater || cc == greater_equal) ? LESS : GREATER; |
} |
-void CompareStub::Generate(MacroAssembler* masm) { |
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); |
+static void CheckInputType(MacroAssembler* masm, |
+ Register input, |
+ CompareIC::State expected, |
+ Label* fail) { |
+ Label ok; |
+ if (expected == CompareIC::SMI) { |
+ __ JumpIfNotSmi(input, fail); |
+ } else if (expected == CompareIC::HEAP_NUMBER) { |
+ __ JumpIfSmi(input, &ok); |
+ __ cmp(FieldOperand(input, HeapObject::kMapOffset), |
+ Immediate(masm->isolate()->factory()->heap_number_map())); |
+ __ j(not_equal, fail); |
+ } |
+ // We could be strict about symbol/string here, but as long as |
+ // hydrogen doesn't care, the stub doesn't have to care either. |
+ __ bind(&ok); |
+} |
+ |
+ |
+static void BranchIfNonSymbol(MacroAssembler* masm, |
+ Label* label, |
+ Register object, |
+ Register scratch) { |
+ __ JumpIfSmi(object, label); |
+ __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset)); |
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset)); |
+ __ and_(scratch, kIsSymbolMask | kIsNotStringMask); |
+ __ cmp(scratch, kSymbolTag | kStringTag); |
+ __ j(not_equal, label); |
+} |
+ |
+ |
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { |
Label check_unequal_objects; |
+ Condition cc = GetCondition(); |
- // Compare two smis if required. |
- if (include_smi_compare_) { |
- Label non_smi, smi_done; |
- __ mov(ecx, edx); |
- __ or_(ecx, eax); |
- __ JumpIfNotSmi(ecx, &non_smi, Label::kNear); |
- __ sub(edx, eax); // Return on the result of the subtraction. |
- __ j(no_overflow, &smi_done, Label::kNear); |
- __ not_(edx); // Correct sign in case of overflow. edx is never 0 here. |
- __ bind(&smi_done); |
- __ mov(eax, edx); |
- __ ret(0); |
- __ bind(&non_smi); |
- } else if (FLAG_debug_code) { |
- __ mov(ecx, edx); |
- __ or_(ecx, eax); |
- __ test(ecx, Immediate(kSmiTagMask)); |
- __ Assert(not_zero, "Unexpected smi operands."); |
- } |
+ Label miss; |
+ CheckInputType(masm, edx, left_, &miss); |
+ CheckInputType(masm, eax, right_, &miss); |
+ |
+ // Compare two smis. |
+ Label non_smi, smi_done; |
+ __ mov(ecx, edx); |
+ __ or_(ecx, eax); |
+ __ JumpIfNotSmi(ecx, &non_smi, Label::kNear); |
+ __ sub(edx, eax); // Return on the result of the subtraction. |
+ __ j(no_overflow, &smi_done, Label::kNear); |
+ __ not_(edx); // Correct sign in case of overflow. edx is never 0 here. |
+ __ bind(&smi_done); |
+ __ mov(eax, edx); |
+ __ ret(0); |
+ __ bind(&non_smi); |
// NOTICE! This code is only reached after a smi-fast-case check, so |
// it is certain that at least one operand isn't a smi. |
@@ -4356,67 +4313,61 @@ void CompareStub::Generate(MacroAssembler* masm) { |
__ cmp(eax, edx); |
__ j(not_equal, ¬_identical); |
- if (cc_ != equal) { |
+ if (cc != equal) { |
// Check for undefined. undefined OP undefined is false even though |
// undefined == undefined. |
Label check_for_nan; |
__ cmp(edx, masm->isolate()->factory()->undefined_value()); |
__ j(not_equal, &check_for_nan, Label::kNear); |
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_)))); |
+ __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc)))); |
__ ret(0); |
__ bind(&check_for_nan); |
} |
// Test for NaN. Sadly, we can't just compare to factory->nan_value(), |
// so we do the second best thing - test it ourselves. |
- // Note: if cc_ != equal, never_nan_nan_ is not used. |
- if (never_nan_nan_ && (cc_ == equal)) { |
- __ Set(eax, Immediate(Smi::FromInt(EQUAL))); |
+ Label heap_number; |
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset), |
+ Immediate(masm->isolate()->factory()->heap_number_map())); |
+ __ j(equal, &heap_number, Label::kNear); |
+ if (cc != equal) { |
+ // Call runtime on identical JSObjects. Otherwise return equal. |
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx); |
+ __ j(above_equal, ¬_identical); |
+ } |
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL))); |
+ __ ret(0); |
+ |
+ __ bind(&heap_number); |
+ // It is a heap number, so return non-equal if it's NaN and equal if |
+ // it's not NaN. |
+ // The representation of NaN values has all exponent bits (52..62) set, |
+ // and not all mantissa bits (0..51) clear. |
+ // We only accept QNaNs, which have bit 51 set. |
+ // Read top bits of double representation (second word of value). |
+ |
+ // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e., |
+ // all bits in the mask are set. We only need to check the word |
+ // that contains the exponent and high bit of the mantissa. |
+ STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0); |
+ __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset)); |
+ __ Set(eax, Immediate(0)); |
+ // Shift value and mask so kQuietNaNHighBitsMask applies to topmost |
+ // bits. |
+ __ add(edx, edx); |
+ __ cmp(edx, kQuietNaNHighBitsMask << 1); |
+ if (cc == equal) { |
+ STATIC_ASSERT(EQUAL != 1); |
+ __ setcc(above_equal, eax); |
__ ret(0); |
} else { |
- Label heap_number; |
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset), |
- Immediate(masm->isolate()->factory()->heap_number_map())); |
- __ j(equal, &heap_number, Label::kNear); |
- if (cc_ != equal) { |
- // Call runtime on identical JSObjects. Otherwise return equal. |
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx); |
- __ j(above_equal, ¬_identical); |
- } |
+ Label nan; |
+ __ j(above_equal, &nan, Label::kNear); |
__ Set(eax, Immediate(Smi::FromInt(EQUAL))); |
__ ret(0); |
- |
- __ bind(&heap_number); |
- // It is a heap number, so return non-equal if it's NaN and equal if |
- // it's not NaN. |
- // The representation of NaN values has all exponent bits (52..62) set, |
- // and not all mantissa bits (0..51) clear. |
- // We only accept QNaNs, which have bit 51 set. |
- // Read top bits of double representation (second word of value). |
- |
- // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e., |
- // all bits in the mask are set. We only need to check the word |
- // that contains the exponent and high bit of the mantissa. |
- STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0); |
- __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset)); |
- __ Set(eax, Immediate(0)); |
- // Shift value and mask so kQuietNaNHighBitsMask applies to topmost |
- // bits. |
- __ add(edx, edx); |
- __ cmp(edx, kQuietNaNHighBitsMask << 1); |
- if (cc_ == equal) { |
- STATIC_ASSERT(EQUAL != 1); |
- __ setcc(above_equal, eax); |
- __ ret(0); |
- } else { |
- Label nan; |
- __ j(above_equal, &nan, Label::kNear); |
- __ Set(eax, Immediate(Smi::FromInt(EQUAL))); |
- __ ret(0); |
- __ bind(&nan); |
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_)))); |
- __ ret(0); |
- } |
+ __ bind(&nan); |
+ __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc)))); |
+ __ ret(0); |
} |
__ bind(¬_identical); |
@@ -4424,7 +4375,7 @@ void CompareStub::Generate(MacroAssembler* masm) { |
// Strict equality can quickly decide whether objects are equal. |
// Non-strict object equality is slower, so it is handled later in the stub. |
- if (cc_ == equal && strict_) { |
+ if (cc == equal && strict()) { |
Label slow; // Fallthrough label. |
Label not_smis; |
// If we're doing a strict equality comparison, we don't have to do |
@@ -4495,70 +4446,68 @@ void CompareStub::Generate(MacroAssembler* masm) { |
} |
// Generate the number comparison code. |
- if (include_number_compare_) { |
- Label non_number_comparison; |
- Label unordered; |
- if (CpuFeatures::IsSupported(SSE2)) { |
- CpuFeatures::Scope use_sse2(SSE2); |
- CpuFeatures::Scope use_cmov(CMOV); |
- |
- FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison); |
- __ ucomisd(xmm0, xmm1); |
+ Label non_number_comparison; |
+ Label unordered; |
+ if (CpuFeatures::IsSupported(SSE2)) { |
+ CpuFeatures::Scope use_sse2(SSE2); |
+ CpuFeatures::Scope use_cmov(CMOV); |
- // Don't base result on EFLAGS when a NaN is involved. |
- __ j(parity_even, &unordered, Label::kNear); |
- // Return a result of -1, 0, or 1, based on EFLAGS. |
- __ mov(eax, 0); // equal |
- __ mov(ecx, Immediate(Smi::FromInt(1))); |
- __ cmov(above, eax, ecx); |
- __ mov(ecx, Immediate(Smi::FromInt(-1))); |
- __ cmov(below, eax, ecx); |
- __ ret(0); |
- } else { |
- FloatingPointHelper::CheckFloatOperands( |
- masm, &non_number_comparison, ebx); |
- FloatingPointHelper::LoadFloatOperand(masm, eax); |
- FloatingPointHelper::LoadFloatOperand(masm, edx); |
- __ FCmp(); |
+ FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison); |
+ __ ucomisd(xmm0, xmm1); |
- // Don't base result on EFLAGS when a NaN is involved. |
- __ j(parity_even, &unordered, Label::kNear); |
+ // Don't base result on EFLAGS when a NaN is involved. |
+ __ j(parity_even, &unordered, Label::kNear); |
+ // Return a result of -1, 0, or 1, based on EFLAGS. |
+ __ mov(eax, 0); // equal |
+ __ mov(ecx, Immediate(Smi::FromInt(1))); |
+ __ cmov(above, eax, ecx); |
+ __ mov(ecx, Immediate(Smi::FromInt(-1))); |
+ __ cmov(below, eax, ecx); |
+ __ ret(0); |
+ } else { |
+ FloatingPointHelper::CheckFloatOperands( |
+ masm, &non_number_comparison, ebx); |
+ FloatingPointHelper::LoadFloatOperand(masm, eax); |
+ FloatingPointHelper::LoadFloatOperand(masm, edx); |
+ __ FCmp(); |
- Label below_label, above_label; |
- // Return a result of -1, 0, or 1, based on EFLAGS. |
- __ j(below, &below_label, Label::kNear); |
- __ j(above, &above_label, Label::kNear); |
+ // Don't base result on EFLAGS when a NaN is involved. |
+ __ j(parity_even, &unordered, Label::kNear); |
- __ Set(eax, Immediate(0)); |
- __ ret(0); |
+ Label below_label, above_label; |
+ // Return a result of -1, 0, or 1, based on EFLAGS. |
+ __ j(below, &below_label, Label::kNear); |
+ __ j(above, &above_label, Label::kNear); |
- __ bind(&below_label); |
- __ mov(eax, Immediate(Smi::FromInt(-1))); |
- __ ret(0); |
+ __ Set(eax, Immediate(0)); |
+ __ ret(0); |
- __ bind(&above_label); |
- __ mov(eax, Immediate(Smi::FromInt(1))); |
- __ ret(0); |
- } |
+ __ bind(&below_label); |
+ __ mov(eax, Immediate(Smi::FromInt(-1))); |
+ __ ret(0); |
- // If one of the numbers was NaN, then the result is always false. |
- // The cc is never not-equal. |
- __ bind(&unordered); |
- ASSERT(cc_ != not_equal); |
- if (cc_ == less || cc_ == less_equal) { |
- __ mov(eax, Immediate(Smi::FromInt(1))); |
- } else { |
- __ mov(eax, Immediate(Smi::FromInt(-1))); |
- } |
+ __ bind(&above_label); |
+ __ mov(eax, Immediate(Smi::FromInt(1))); |
__ ret(0); |
+ } |
- // The number comparison code did not provide a valid result. |
- __ bind(&non_number_comparison); |
+ // If one of the numbers was NaN, then the result is always false. |
+ // The cc is never not-equal. |
+ __ bind(&unordered); |
+ ASSERT(cc != not_equal); |
+ if (cc == less || cc == less_equal) { |
+ __ mov(eax, Immediate(Smi::FromInt(1))); |
+ } else { |
+ __ mov(eax, Immediate(Smi::FromInt(-1))); |
} |
+ __ ret(0); |
+ |
+ // The number comparison code did not provide a valid result. |
+ __ bind(&non_number_comparison); |
// Fast negative check for symbol-to-symbol equality. |
Label check_for_strings; |
- if (cc_ == equal) { |
+ if (cc == equal) { |
BranchIfNonSymbol(masm, &check_for_strings, eax, ecx); |
BranchIfNonSymbol(masm, &check_for_strings, edx, ecx); |
@@ -4574,7 +4523,7 @@ void CompareStub::Generate(MacroAssembler* masm) { |
&check_unequal_objects); |
// Inline comparison of ASCII strings. |
- if (cc_ == equal) { |
+ if (cc == equal) { |
StringCompareStub::GenerateFlatAsciiStringEquals(masm, |
edx, |
eax, |
@@ -4593,7 +4542,7 @@ void CompareStub::Generate(MacroAssembler* masm) { |
#endif |
__ bind(&check_unequal_objects); |
- if (cc_ == equal && !strict_) { |
+ if (cc == equal && !strict()) { |
// Non-strict equality. Objects are unequal if |
// they are both JSObjects and not undetectable, |
// and their pointers are different. |
@@ -4637,11 +4586,11 @@ void CompareStub::Generate(MacroAssembler* masm) { |
// Figure out which native to call and setup the arguments. |
Builtins::JavaScript builtin; |
- if (cc_ == equal) { |
- builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; |
+ if (cc == equal) { |
+ builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; |
} else { |
builtin = Builtins::COMPARE; |
- __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_)))); |
+ __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc)))); |
} |
// Restore return address on the stack. |
@@ -4650,19 +4599,9 @@ void CompareStub::Generate(MacroAssembler* masm) { |
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater) |
// tagged as a small integer. |
__ InvokeBuiltin(builtin, JUMP_FUNCTION); |
-} |
- |
-void CompareStub::BranchIfNonSymbol(MacroAssembler* masm, |
- Label* label, |
- Register object, |
- Register scratch) { |
- __ JumpIfSmi(object, label); |
- __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset)); |
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset)); |
- __ and_(scratch, kIsSymbolMask | kIsNotStringMask); |
- __ cmp(scratch, kSymbolTag | kStringTag); |
- __ j(not_equal, label); |
+ __ bind(&miss); |
+ GenerateMiss(masm); |
} |
@@ -5407,44 +5346,6 @@ Register InstanceofStub::left() { return eax; } |
Register InstanceofStub::right() { return edx; } |
-int CompareStub::MinorKey() { |
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate |
- // stubs the never NaN NaN condition is only taken into account if the |
- // condition is equals. |
- ASSERT(static_cast<unsigned>(cc_) < (1 << 12)); |
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); |
- return ConditionField::encode(static_cast<unsigned>(cc_)) |
- | RegisterField::encode(false) // lhs_ and rhs_ are not used |
- | StrictField::encode(strict_) |
- | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false) |
- | IncludeNumberCompareField::encode(include_number_compare_) |
- | IncludeSmiCompareField::encode(include_smi_compare_); |
-} |
- |
- |
-// Unfortunately you have to run without snapshots to see most of these |
-// names in the profile since most compare stubs end up in the snapshot. |
-void CompareStub::PrintName(StringStream* stream) { |
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); |
- const char* cc_name; |
- switch (cc_) { |
- case less: cc_name = "LT"; break; |
- case greater: cc_name = "GT"; break; |
- case less_equal: cc_name = "LE"; break; |
- case greater_equal: cc_name = "GE"; break; |
- case equal: cc_name = "EQ"; break; |
- case not_equal: cc_name = "NE"; break; |
- default: cc_name = "UnknownCondition"; break; |
- } |
- bool is_equality = cc_ == equal || cc_ == not_equal; |
- stream->Add("CompareStub_%s", cc_name); |
- if (strict_ && is_equality) stream->Add("_STRICT"); |
- if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN"); |
- if (!include_number_compare_) stream->Add("_NO_NUMBER"); |
- if (!include_smi_compare_) stream->Add("_NO_SMI"); |
-} |
- |
- |
// ------------------------------------------------------------------------- |
// StringCharCodeAtGenerator |
@@ -6606,7 +6507,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) { |
void ICCompareStub::GenerateSmis(MacroAssembler* masm) { |
- ASSERT(state_ == CompareIC::SMIS); |
+ ASSERT(state_ == CompareIC::SMI); |
Label miss; |
__ mov(ecx, edx); |
__ or_(ecx, eax); |
@@ -6632,31 +6533,52 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) { |
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { |
- ASSERT(state_ == CompareIC::HEAP_NUMBERS); |
+ ASSERT(state_ == CompareIC::HEAP_NUMBER); |
Label generic_stub; |
Label unordered, maybe_undefined1, maybe_undefined2; |
Label miss; |
- __ mov(ecx, edx); |
- __ and_(ecx, eax); |
- __ JumpIfSmi(ecx, &generic_stub, Label::kNear); |
- __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx); |
- __ j(not_equal, &maybe_undefined1, Label::kNear); |
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx); |
- __ j(not_equal, &maybe_undefined2, Label::kNear); |
+ if (left_ == CompareIC::SMI) { |
+ __ JumpIfNotSmi(edx, &miss); |
+ } |
+ if (right_ == CompareIC::SMI) { |
+ __ JumpIfNotSmi(eax, &miss); |
+ } |
// Inlining the double comparison and falling back to the general compare |
- // stub if NaN is involved or SS2 or CMOV is unsupported. |
+ // stub if NaN is involved or SSE2 or CMOV is unsupported. |
if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) { |
CpuFeatures::Scope scope1(SSE2); |
CpuFeatures::Scope scope2(CMOV); |
- // Load left and right operand |
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); |
+ // Load left and right operand. |
+ Label done, left, left_smi, right_smi; |
+ __ JumpIfSmi(eax, &right_smi, Label::kNear); |
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset), |
+ masm->isolate()->factory()->heap_number_map()); |
+ __ j(not_equal, &maybe_undefined1, Label::kNear); |
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); |
+ __ jmp(&left, Label::kNear); |
+ __ bind(&right_smi); |
+ __ mov(ecx, eax); // Can't clobber eax because we can still jump away. |
+ __ SmiUntag(ecx); |
+ __ cvtsi2sd(xmm1, ecx); |
+ |
+ __ bind(&left); |
+ __ JumpIfSmi(edx, &left_smi, Label::kNear); |
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset), |
+ masm->isolate()->factory()->heap_number_map()); |
+ __ j(not_equal, &maybe_undefined2, Label::kNear); |
+ __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); |
+ __ jmp(&done); |
+ __ bind(&left_smi); |
+ __ mov(ecx, edx); // Can't clobber edx because we can still jump away. |
+ __ SmiUntag(ecx); |
+ __ cvtsi2sd(xmm0, ecx); |
- // Compare operands |
+ __ bind(&done); |
+ // Compare operands. |
__ ucomisd(xmm0, xmm1); |
// Don't base result on EFLAGS when a NaN is involved. |
@@ -6670,17 +6592,30 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { |
__ mov(ecx, Immediate(Smi::FromInt(-1))); |
__ cmov(below, eax, ecx); |
__ ret(0); |
+ } else { |
+ __ mov(ecx, edx); |
+ __ and_(ecx, eax); |
+ __ JumpIfSmi(ecx, &generic_stub, Label::kNear); |
+ |
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset), |
+ masm->isolate()->factory()->heap_number_map()); |
+ __ j(not_equal, &maybe_undefined1, Label::kNear); |
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset), |
+ masm->isolate()->factory()->heap_number_map()); |
+ __ j(not_equal, &maybe_undefined2, Label::kNear); |
} |
__ bind(&unordered); |
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS); |
__ bind(&generic_stub); |
+ ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, |
+ CompareIC::GENERIC); |
__ jmp(stub.GetCode(), RelocInfo::CODE_TARGET); |
__ bind(&maybe_undefined1); |
if (Token::IsOrderedRelationalCompareOp(op_)) { |
__ cmp(eax, Immediate(masm->isolate()->factory()->undefined_value())); |
__ j(not_equal, &miss); |
+ __ JumpIfSmi(edx, &unordered); |
__ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx); |
__ j(not_equal, &maybe_undefined2, Label::kNear); |
__ jmp(&unordered); |
@@ -6698,7 +6633,7 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { |
void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { |
- ASSERT(state_ == CompareIC::SYMBOLS); |
+ ASSERT(state_ == CompareIC::SYMBOL); |
ASSERT(GetCondition() == equal); |
// Registers containing left and right operands respectively. |
@@ -6743,7 +6678,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { |
void ICCompareStub::GenerateStrings(MacroAssembler* masm) { |
- ASSERT(state_ == CompareIC::STRINGS); |
+ ASSERT(state_ == CompareIC::STRING); |
Label miss; |
bool equality = Token::IsEqualityOp(op_); |
@@ -6832,7 +6767,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { |
void ICCompareStub::GenerateObjects(MacroAssembler* masm) { |
- ASSERT(state_ == CompareIC::OBJECTS); |
+ ASSERT(state_ == CompareIC::OBJECT); |
Label miss; |
__ mov(ecx, edx); |
__ and_(ecx, eax); |