| Index: src/arm/stub-cache-arm.cc
|
| diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
|
| index 5d10a7e73bce9df195aabe1e89c60dc4902d55fd..40b3313022569ade67df97e7c9857a513dde9005 100644
|
| --- a/src/arm/stub-cache-arm.cc
|
| +++ b/src/arm/stub-cache-arm.cc
|
| @@ -1680,8 +1680,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
|
|
|
| // Get the array's length into r0 and calculate new length.
|
| __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
| - STATIC_ASSERT(kSmiTagSize == 1);
|
| - STATIC_ASSERT(kSmiTag == 0);
|
| __ add(r0, r0, Operand(Smi::FromInt(argc)));
|
|
|
| // Get the elements' length.
|
| @@ -1701,8 +1699,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
|
| // Store the value.
|
| // We may need a register containing the address end_elements below,
|
| // so write back the value in end_elements.
|
| - __ add(end_elements, elements,
|
| - Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
|
| + __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
|
| const int kEndElementsOffset =
|
| FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
|
| __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
|
| @@ -1722,8 +1719,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
|
|
|
| // Get the array's length into r0 and calculate new length.
|
| __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
| - STATIC_ASSERT(kSmiTagSize == 1);
|
| - STATIC_ASSERT(kSmiTag == 0);
|
| __ add(r0, r0, Operand(Smi::FromInt(argc)));
|
|
|
| // Get the elements' length.
|
| @@ -1797,8 +1792,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
|
| // Store the value.
|
| // We may need a register containing the address end_elements below,
|
| // so write back the value in end_elements.
|
| - __ add(end_elements, elements,
|
| - Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
|
| + __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
|
| __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
|
|
|
| __ RecordWrite(elements,
|
| @@ -1835,8 +1829,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
|
|
|
| const int kAllocationDelta = 4;
|
| // Load top and check if it is the end of elements.
|
| - __ add(end_elements, elements,
|
| - Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
|
| + __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
|
| __ add(end_elements, end_elements, Operand(kEndElementsOffset));
|
| __ mov(r7, Operand(new_space_allocation_top));
|
| __ ldr(r3, MemOperand(r7));
|
| @@ -1932,11 +1925,9 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
|
|
|
| // Get the last element.
|
| __ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
|
| - STATIC_ASSERT(kSmiTagSize == 1);
|
| - STATIC_ASSERT(kSmiTag == 0);
|
| // We can't address the last element in one operation. Compute the more
|
| // expensive shift first, and use an offset later on.
|
| - __ add(elements, elements, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
|
| + __ add(elements, elements, Operand::PointerOffsetFromSmiKey(r4));
|
| __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
|
| __ cmp(r0, r6);
|
| __ b(eq, &call_builtin);
|
| @@ -2158,7 +2149,6 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
|
| if (cell.is_null()) {
|
| __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
|
|
|
| - STATIC_ASSERT(kSmiTag == 0);
|
| __ JumpIfSmi(r1, &miss);
|
|
|
| CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
|
| @@ -2176,7 +2166,6 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
|
|
|
| // Check the code is a smi.
|
| Label slow;
|
| - STATIC_ASSERT(kSmiTag == 0);
|
| __ JumpIfNotSmi(code, &slow);
|
|
|
| // Convert the smi code to uint16.
|
| @@ -2230,7 +2219,6 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
|
|
|
| if (cell.is_null()) {
|
| __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
|
| - STATIC_ASSERT(kSmiTag == 0);
|
| __ JumpIfSmi(r1, &miss);
|
| CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
|
| name, &miss);
|
| @@ -2245,8 +2233,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
|
| __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
|
|
|
| // If the argument is a smi, just return.
|
| - STATIC_ASSERT(kSmiTag == 0);
|
| - __ tst(r0, Operand(kSmiTagMask));
|
| + __ SmiTst(r0);
|
| __ Drop(argc + 1, eq);
|
| __ Ret(eq);
|
|
|
| @@ -2292,11 +2279,9 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
|
| __ bind(&smi_check);
|
| // Check if the result can fit into an smi. If we had an overflow,
|
| // the result is either 0x80000000 or 0x7FFFFFFF and won't fit into an smi.
|
| - __ add(r1, r0, Operand(0x40000000), SetCC);
|
| // If result doesn't fit into an smi, branch to slow.
|
| - __ b(&slow, mi);
|
| - // Tag the result.
|
| - __ mov(r0, Operand(r0, LSL, kSmiTagSize));
|
| + __ SmiTag(r0, SetCC);
|
| + __ b(vs, &slow);
|
|
|
| __ bind(&just_return);
|
| __ Drop(argc + 1);
|
| @@ -2341,7 +2326,6 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
|
| GenerateNameCheck(name, &miss);
|
| if (cell.is_null()) {
|
| __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
|
| - STATIC_ASSERT(kSmiTag == 0);
|
| __ JumpIfSmi(r1, &miss);
|
| CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
|
| name, &miss);
|
| @@ -2357,7 +2341,6 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
|
|
|
| // Check if the argument is a smi.
|
| Label not_smi;
|
| - STATIC_ASSERT(kSmiTag == 0);
|
| __ JumpIfNotSmi(r0, ¬_smi);
|
|
|
| // Do bitwise not or do nothing depending on the sign of the
|
| @@ -3237,8 +3220,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
|
| Register key = r0;
|
| Register receiver = r1;
|
|
|
| - __ JumpIfNotSmi(key, &miss_force_generic);
|
| - __ mov(r2, Operand(key, ASR, kSmiTagSize));
|
| + __ UntagAndJumpIfNotSmi(r2, key, &miss_force_generic);
|
| __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
| __ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5);
|
| __ Ret();
|
| @@ -3270,7 +3252,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
|
| static void GenerateSmiKeyCheck(MacroAssembler* masm,
|
| Register key,
|
| Register scratch0,
|
| - Register scratch1,
|
| DwVfpRegister double_scratch0,
|
| DwVfpRegister double_scratch1,
|
| Label* fail) {
|
| @@ -3288,8 +3269,7 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
|
| __ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
|
| __ TryDoubleToInt32Exact(scratch0, double_scratch0, double_scratch1);
|
| __ b(ne, fail);
|
| - __ TrySmiTag(scratch0, fail, scratch1);
|
| - __ mov(key, scratch0);
|
| + __ TrySmiTag(key, scratch0, fail);
|
| __ bind(&key_ok);
|
| }
|
|
|
| @@ -3315,7 +3295,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
| // have been verified by the caller to not be a smi.
|
|
|
| // Check that the key is a smi or a heap number convertible to a smi.
|
| - GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic);
|
| + GenerateSmiKeyCheck(masm, key, r4, d1, d2, &miss_force_generic);
|
|
|
| __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
|
|
| @@ -3330,11 +3310,10 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
| // r3: external array.
|
| if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
|
| // Double to pixel conversion is only implemented in the runtime for now.
|
| - __ JumpIfNotSmi(value, &slow);
|
| + __ UntagAndJumpIfNotSmi(r5, value, &slow);
|
| } else {
|
| - __ JumpIfNotSmi(value, &check_heap_number);
|
| + __ UntagAndJumpIfNotSmi(r5, value, &check_heap_number);
|
| }
|
| - __ SmiUntag(r5, value);
|
| __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
|
|
|
| // r3: base pointer of external storage.
|
| @@ -3505,7 +3484,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
|
| // have been verified by the caller to not be a smi.
|
|
|
| // Check that the key is a smi or a heap number convertible to a smi.
|
| - GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
|
| + GenerateSmiKeyCheck(masm, key_reg, r4, d1, d2, &miss_force_generic);
|
|
|
| if (IsFastSmiElementsKind(elements_kind)) {
|
| __ JumpIfNotSmi(value_reg, &transition_elements_kind);
|
| @@ -3539,20 +3518,14 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
|
| __ add(scratch,
|
| elements_reg,
|
| Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
| - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
|
| - __ add(scratch,
|
| - scratch,
|
| - Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
|
| + __ add(scratch, scratch, Operand::PointerOffsetFromSmiKey(key_reg));
|
| __ str(value_reg, MemOperand(scratch));
|
| } else {
|
| ASSERT(IsFastObjectElementsKind(elements_kind));
|
| __ add(scratch,
|
| elements_reg,
|
| Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
| - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
|
| - __ add(scratch,
|
| - scratch,
|
| - Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
|
| + __ add(scratch, scratch, Operand::PointerOffsetFromSmiKey(key_reg));
|
| __ str(value_reg, MemOperand(scratch));
|
| __ mov(receiver_reg, value_reg);
|
| __ RecordWrite(elements_reg, // Object.
|
| @@ -3666,7 +3639,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
|
| // have been verified by the caller to not be a smi.
|
|
|
| // Check that the key is a smi or a heap number convertible to a smi.
|
| - GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
|
| + GenerateSmiKeyCheck(masm, key_reg, r4, d1, d2, &miss_force_generic);
|
|
|
| __ ldr(elements_reg,
|
| FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
|
|
|