Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(847)

Side by Side Diff: src/arm/macro-assembler-arm.cc

Issue 10824235: Fix the full compiler on ARM to always generate the same code (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 8 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after
130 #endif 130 #endif
131 ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start)); 131 ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
132 } 132 }
133 133
134 134
135 int MacroAssembler::CallSize( 135 int MacroAssembler::CallSize(
136 Address target, RelocInfo::Mode rmode, Condition cond) { 136 Address target, RelocInfo::Mode rmode, Condition cond) {
137 int size = 2 * kInstrSize; 137 int size = 2 * kInstrSize;
138 Instr mov_instr = cond | MOV | LeaveCC; 138 Instr mov_instr = cond | MOV | LeaveCC;
139 intptr_t immediate = reinterpret_cast<intptr_t>(target); 139 intptr_t immediate = reinterpret_cast<intptr_t>(target);
140 if (!Operand(immediate, rmode).is_single_instruction(mov_instr)) { 140 if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) {
141 size += kInstrSize;
142 }
143 return size;
144 }
145
146
147 int MacroAssembler::CallSizeNotPredictableSize(
148 Address target, RelocInfo::Mode rmode, Condition cond) {
149 int size = 2 * kInstrSize;
150 Instr mov_instr = cond | MOV | LeaveCC;
151 intptr_t immediate = reinterpret_cast<intptr_t>(target);
152 if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) {
141 size += kInstrSize; 153 size += kInstrSize;
142 } 154 }
143 return size; 155 return size;
144 } 156 }
145 157
146 158
147 void MacroAssembler::Call(Address target, 159 void MacroAssembler::Call(Address target,
148 RelocInfo::Mode rmode, 160 RelocInfo::Mode rmode,
149 Condition cond) { 161 Condition cond) {
150 // Block constant pool for the call instruction sequence. 162 // Block constant pool for the call instruction sequence.
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after
269 CpuFeatures::Scope scope(VFP2); 281 CpuFeatures::Scope scope(VFP2);
270 if (!dst.is(src)) { 282 if (!dst.is(src)) {
271 vmov(dst, src); 283 vmov(dst, src);
272 } 284 }
273 } 285 }
274 286
275 287
276 void MacroAssembler::And(Register dst, Register src1, const Operand& src2, 288 void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
277 Condition cond) { 289 Condition cond) {
278 if (!src2.is_reg() && 290 if (!src2.is_reg() &&
279 !src2.must_use_constant_pool() && 291 !src2.must_use_constant_pool(this) &&
280 src2.immediate() == 0) { 292 src2.immediate() == 0) {
281 mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond); 293 mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond);
282 294
283 } else if (!src2.is_single_instruction() && 295 } else if (!src2.is_single_instruction(this) &&
284 !src2.must_use_constant_pool() && 296 !src2.must_use_constant_pool(this) &&
285 CpuFeatures::IsSupported(ARMv7) && 297 CpuFeatures::IsSupported(ARMv7) &&
286 IsPowerOf2(src2.immediate() + 1)) { 298 IsPowerOf2(src2.immediate() + 1)) {
287 ubfx(dst, src1, 0, 299 ubfx(dst, src1, 0,
288 WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond); 300 WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
289 301
290 } else { 302 } else {
291 and_(dst, src1, src2, LeaveCC, cond); 303 and_(dst, src1, src2, LeaveCC, cond);
292 } 304 }
293 } 305 }
294 306
295 307
296 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width, 308 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
297 Condition cond) { 309 Condition cond) {
298 ASSERT(lsb < 32); 310 ASSERT(lsb < 32);
299 if (!CpuFeatures::IsSupported(ARMv7)) { 311 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
300 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); 312 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
301 and_(dst, src1, Operand(mask), LeaveCC, cond); 313 and_(dst, src1, Operand(mask), LeaveCC, cond);
302 if (lsb != 0) { 314 if (lsb != 0) {
303 mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond); 315 mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
304 } 316 }
305 } else { 317 } else {
306 ubfx(dst, src1, lsb, width, cond); 318 ubfx(dst, src1, lsb, width, cond);
307 } 319 }
308 } 320 }
309 321
310 322
311 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width, 323 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
312 Condition cond) { 324 Condition cond) {
313 ASSERT(lsb < 32); 325 ASSERT(lsb < 32);
314 if (!CpuFeatures::IsSupported(ARMv7)) { 326 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
315 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); 327 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
316 and_(dst, src1, Operand(mask), LeaveCC, cond); 328 and_(dst, src1, Operand(mask), LeaveCC, cond);
317 int shift_up = 32 - lsb - width; 329 int shift_up = 32 - lsb - width;
318 int shift_down = lsb + shift_up; 330 int shift_down = lsb + shift_up;
319 if (shift_up != 0) { 331 if (shift_up != 0) {
320 mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond); 332 mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
321 } 333 }
322 if (shift_down != 0) { 334 if (shift_down != 0) {
323 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond); 335 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
324 } 336 }
325 } else { 337 } else {
326 sbfx(dst, src1, lsb, width, cond); 338 sbfx(dst, src1, lsb, width, cond);
327 } 339 }
328 } 340 }
329 341
330 342
331 void MacroAssembler::Bfi(Register dst, 343 void MacroAssembler::Bfi(Register dst,
332 Register src, 344 Register src,
333 Register scratch, 345 Register scratch,
334 int lsb, 346 int lsb,
335 int width, 347 int width,
336 Condition cond) { 348 Condition cond) {
337 ASSERT(0 <= lsb && lsb < 32); 349 ASSERT(0 <= lsb && lsb < 32);
338 ASSERT(0 <= width && width < 32); 350 ASSERT(0 <= width && width < 32);
339 ASSERT(lsb + width < 32); 351 ASSERT(lsb + width < 32);
340 ASSERT(!scratch.is(dst)); 352 ASSERT(!scratch.is(dst));
341 if (width == 0) return; 353 if (width == 0) return;
342 if (!CpuFeatures::IsSupported(ARMv7)) { 354 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
343 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); 355 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
344 bic(dst, dst, Operand(mask)); 356 bic(dst, dst, Operand(mask));
345 and_(scratch, src, Operand((1 << width) - 1)); 357 and_(scratch, src, Operand((1 << width) - 1));
346 mov(scratch, Operand(scratch, LSL, lsb)); 358 mov(scratch, Operand(scratch, LSL, lsb));
347 orr(dst, dst, scratch); 359 orr(dst, dst, scratch);
348 } else { 360 } else {
349 bfi(dst, src, lsb, width, cond); 361 bfi(dst, src, lsb, width, cond);
350 } 362 }
351 } 363 }
352 364
353 365
354 void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) { 366 void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
355 ASSERT(lsb < 32); 367 ASSERT(lsb < 32);
356 if (!CpuFeatures::IsSupported(ARMv7)) { 368 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
357 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); 369 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
358 bic(dst, dst, Operand(mask)); 370 bic(dst, dst, Operand(mask));
359 } else { 371 } else {
360 bfc(dst, lsb, width, cond); 372 bfc(dst, lsb, width, cond);
361 } 373 }
362 } 374 }
363 375
364 376
365 void MacroAssembler::Usat(Register dst, int satpos, const Operand& src, 377 void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
366 Condition cond) { 378 Condition cond) {
367 if (!CpuFeatures::IsSupported(ARMv7)) { 379 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
368 ASSERT(!dst.is(pc) && !src.rm().is(pc)); 380 ASSERT(!dst.is(pc) && !src.rm().is(pc));
369 ASSERT((satpos >= 0) && (satpos <= 31)); 381 ASSERT((satpos >= 0) && (satpos <= 31));
370 382
371 // These asserts are required to ensure compatibility with the ARMv7 383 // These asserts are required to ensure compatibility with the ARMv7
372 // implementation. 384 // implementation.
373 ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL)); 385 ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL));
374 ASSERT(src.rs().is(no_reg)); 386 ASSERT(src.rs().is(no_reg));
375 387
376 Label done; 388 Label done;
377 int satval = (1 << satpos) - 1; 389 int satval = (1 << satpos) - 1;
(...skipping 287 matching lines...) Expand 10 before | Expand all | Expand 10 after
665 ASSERT(src.rm().is(no_reg)); 677 ASSERT(src.rm().is(no_reg));
666 ASSERT(!dst1.is(lr)); // r14. 678 ASSERT(!dst1.is(lr)); // r14.
667 ASSERT_EQ(0, dst1.code() % 2); 679 ASSERT_EQ(0, dst1.code() % 2);
668 ASSERT_EQ(dst1.code() + 1, dst2.code()); 680 ASSERT_EQ(dst1.code() + 1, dst2.code());
669 681
670 // V8 does not use this addressing mode, so the fallback code 682 // V8 does not use this addressing mode, so the fallback code
671 // below doesn't support it yet. 683 // below doesn't support it yet.
672 ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex)); 684 ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
673 685
674 // Generate two ldr instructions if ldrd is not available. 686 // Generate two ldr instructions if ldrd is not available.
675 if (CpuFeatures::IsSupported(ARMv7)) { 687 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
676 CpuFeatures::Scope scope(ARMv7); 688 CpuFeatures::Scope scope(ARMv7);
677 ldrd(dst1, dst2, src, cond); 689 ldrd(dst1, dst2, src, cond);
678 } else { 690 } else {
679 if ((src.am() == Offset) || (src.am() == NegOffset)) { 691 if ((src.am() == Offset) || (src.am() == NegOffset)) {
680 MemOperand src2(src); 692 MemOperand src2(src);
681 src2.set_offset(src2.offset() + 4); 693 src2.set_offset(src2.offset() + 4);
682 if (dst1.is(src.rn())) { 694 if (dst1.is(src.rn())) {
683 ldr(dst2, src2, cond); 695 ldr(dst2, src2, cond);
684 ldr(dst1, src, cond); 696 ldr(dst1, src, cond);
685 } else { 697 } else {
(...skipping 21 matching lines...) Expand all
707 ASSERT(dst.rm().is(no_reg)); 719 ASSERT(dst.rm().is(no_reg));
708 ASSERT(!src1.is(lr)); // r14. 720 ASSERT(!src1.is(lr)); // r14.
709 ASSERT_EQ(0, src1.code() % 2); 721 ASSERT_EQ(0, src1.code() % 2);
710 ASSERT_EQ(src1.code() + 1, src2.code()); 722 ASSERT_EQ(src1.code() + 1, src2.code());
711 723
712 // V8 does not use this addressing mode, so the fallback code 724 // V8 does not use this addressing mode, so the fallback code
713 // below doesn't support it yet. 725 // below doesn't support it yet.
714 ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex)); 726 ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
715 727
716 // Generate two str instructions if strd is not available. 728 // Generate two str instructions if strd is not available.
717 if (CpuFeatures::IsSupported(ARMv7)) { 729 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
718 CpuFeatures::Scope scope(ARMv7); 730 CpuFeatures::Scope scope(ARMv7);
719 strd(src1, src2, dst, cond); 731 strd(src1, src2, dst, cond);
720 } else { 732 } else {
721 MemOperand dst2(dst); 733 MemOperand dst2(dst);
722 if ((dst.am() == Offset) || (dst.am() == NegOffset)) { 734 if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
723 dst2.set_offset(dst2.offset() + 4); 735 dst2.set_offset(dst2.offset() + 4);
724 str(src1, dst, cond); 736 str(src1, dst, cond);
725 str(src2, dst2, cond); 737 str(src2, dst2, cond);
726 } else { // PostIndex or NegPostIndex. 738 } else { // PostIndex or NegPostIndex.
727 ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex)); 739 ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
(...skipping 1851 matching lines...) Expand 10 before | Expand all | Expand 10 after
2579 input_high, 2591 input_high,
2580 input_low, 2592 input_low,
2581 scratch); 2593 scratch);
2582 bind(&done); 2594 bind(&done);
2583 } 2595 }
2584 2596
2585 2597
2586 void MacroAssembler::GetLeastBitsFromSmi(Register dst, 2598 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2587 Register src, 2599 Register src,
2588 int num_least_bits) { 2600 int num_least_bits) {
2589 if (CpuFeatures::IsSupported(ARMv7)) { 2601 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
2590 ubfx(dst, src, kSmiTagSize, num_least_bits); 2602 ubfx(dst, src, kSmiTagSize, num_least_bits);
2591 } else { 2603 } else {
2592 mov(dst, Operand(src, ASR, kSmiTagSize)); 2604 mov(dst, Operand(src, ASR, kSmiTagSize));
2593 and_(dst, dst, Operand((1 << num_least_bits) - 1)); 2605 and_(dst, dst, Operand((1 << num_least_bits) - 1));
2594 } 2606 }
2595 } 2607 }
2596 2608
2597 2609
2598 void MacroAssembler::GetLeastBitsFromInt32(Register dst, 2610 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2599 Register src, 2611 Register src,
(...skipping 1205 matching lines...) Expand 10 before | Expand all | Expand 10 after
3805 void CodePatcher::EmitCondition(Condition cond) { 3817 void CodePatcher::EmitCondition(Condition cond) {
3806 Instr instr = Assembler::instr_at(masm_.pc_); 3818 Instr instr = Assembler::instr_at(masm_.pc_);
3807 instr = (instr & ~kCondMask) | cond; 3819 instr = (instr & ~kCondMask) | cond;
3808 masm_.emit(instr); 3820 masm_.emit(instr);
3809 } 3821 }
3810 3822
3811 3823
3812 } } // namespace v8::internal 3824 } } // namespace v8::internal
3813 3825
3814 #endif // V8_TARGET_ARCH_ARM 3826 #endif // V8_TARGET_ARCH_ARM
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698