OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1279 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1290 __ JumpIfNotSmi(result, &deoptimize); | 1290 __ JumpIfNotSmi(result, &deoptimize); |
1291 __ SmiUntag(result); | 1291 __ SmiUntag(result); |
1292 __ b(&done); | 1292 __ b(&done); |
1293 | 1293 |
1294 __ bind(&deoptimize); | 1294 __ bind(&deoptimize); |
1295 DeoptimizeIf(al, instr->environment()); | 1295 DeoptimizeIf(al, instr->environment()); |
1296 __ bind(&done); | 1296 __ bind(&done); |
1297 } | 1297 } |
1298 | 1298 |
1299 | 1299 |
| 1300 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { |
| 1301 DwVfpRegister addend = ToDoubleRegister(instr->addend()); |
| 1302 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); |
| 1303 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); |
| 1304 |
| 1305 // This is computed in-place. |
| 1306 ASSERT(addend.is(ToDoubleRegister(instr->result()))); |
| 1307 |
| 1308 __ vmla(addend, multiplier, multiplicand); |
| 1309 } |
| 1310 |
| 1311 |
1300 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) { | 1312 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) { |
1301 const Register result = ToRegister(instr->result()); | 1313 const Register result = ToRegister(instr->result()); |
1302 const Register left = ToRegister(instr->left()); | 1314 const Register left = ToRegister(instr->left()); |
1303 const Register remainder = ToRegister(instr->temp()); | 1315 const Register remainder = ToRegister(instr->temp()); |
1304 const Register scratch = scratch0(); | 1316 const Register scratch = scratch0(); |
1305 | 1317 |
1306 // We only optimize this for division by constants, because the standard | 1318 // We only optimize this for division by constants, because the standard |
1307 // integer division routine is usually slower than transitionning to VFP. | 1319 // integer division routine is usually slower than transitionning to VFP. |
1308 // This could be optimized on processors with SDIV available. | 1320 // This could be optimized on processors with SDIV available. |
1309 ASSERT(instr->right()->IsConstantOperand()); | 1321 ASSERT(instr->right()->IsConstantOperand()); |
(...skipping 4405 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5715 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); | 5727 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); |
5716 __ ldr(result, FieldMemOperand(scratch, | 5728 __ ldr(result, FieldMemOperand(scratch, |
5717 FixedArray::kHeaderSize - kPointerSize)); | 5729 FixedArray::kHeaderSize - kPointerSize)); |
5718 __ bind(&done); | 5730 __ bind(&done); |
5719 } | 5731 } |
5720 | 5732 |
5721 | 5733 |
5722 #undef __ | 5734 #undef __ |
5723 | 5735 |
5724 } } // namespace v8::internal | 5736 } } // namespace v8::internal |
OLD | NEW |