| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 621 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 632 } | 632 } |
| 633 } | 633 } |
| 634 Translation translation(&translations_, frame_count, jsframe_count, | 634 Translation translation(&translations_, frame_count, jsframe_count, |
| 635 zone()); | 635 zone()); |
| 636 WriteTranslation(environment, &translation); | 636 WriteTranslation(environment, &translation); |
| 637 int deoptimization_index = deoptimizations_.length(); | 637 int deoptimization_index = deoptimizations_.length(); |
| 638 int pc_offset = masm()->pc_offset(); | 638 int pc_offset = masm()->pc_offset(); |
| 639 environment->Register(deoptimization_index, | 639 environment->Register(deoptimization_index, |
| 640 translation.index(), | 640 translation.index(), |
| 641 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); | 641 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); |
| 642 deoptimizations_.Add(environment); | 642 deoptimizations_.Add(environment, zone()); |
| 643 } | 643 } |
| 644 } | 644 } |
| 645 | 645 |
| 646 | 646 |
| 647 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { | 647 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { |
| 648 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 648 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| 649 ASSERT(environment->HasBeenRegistered()); | 649 ASSERT(environment->HasBeenRegistered()); |
| 650 int id = environment->deoptimization_index(); | 650 int id = environment->deoptimization_index(); |
| 651 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER); | 651 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER); |
| 652 if (entry == NULL) { | 652 if (entry == NULL) { |
| (...skipping 11 matching lines...) Expand all Loading... |
| 664 | 664 |
| 665 if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc); | 665 if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc); |
| 666 | 666 |
| 667 if (cc == al) { | 667 if (cc == al) { |
| 668 __ Jump(entry, RelocInfo::RUNTIME_ENTRY); | 668 __ Jump(entry, RelocInfo::RUNTIME_ENTRY); |
| 669 } else { | 669 } else { |
| 670 // We often have several deopts to the same entry, reuse the last | 670 // We often have several deopts to the same entry, reuse the last |
| 671 // jump entry if this is the case. | 671 // jump entry if this is the case. |
| 672 if (deopt_jump_table_.is_empty() || | 672 if (deopt_jump_table_.is_empty() || |
| 673 (deopt_jump_table_.last().address != entry)) { | 673 (deopt_jump_table_.last().address != entry)) { |
| 674 deopt_jump_table_.Add(JumpTableEntry(entry)); | 674 deopt_jump_table_.Add(JumpTableEntry(entry), zone()); |
| 675 } | 675 } |
| 676 __ b(cc, &deopt_jump_table_.last().label); | 676 __ b(cc, &deopt_jump_table_.last().label); |
| 677 } | 677 } |
| 678 } | 678 } |
| 679 | 679 |
| 680 | 680 |
| 681 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { | 681 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { |
| 682 int length = deoptimizations_.length(); | 682 int length = deoptimizations_.length(); |
| 683 if (length == 0) return; | 683 if (length == 0) return; |
| 684 Handle<DeoptimizationInputData> data = | 684 Handle<DeoptimizationInputData> data = |
| (...skipping 24 matching lines...) Expand all Loading... |
| 709 } | 709 } |
| 710 code->set_deoptimization_data(*data); | 710 code->set_deoptimization_data(*data); |
| 711 } | 711 } |
| 712 | 712 |
| 713 | 713 |
| 714 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) { | 714 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) { |
| 715 int result = deoptimization_literals_.length(); | 715 int result = deoptimization_literals_.length(); |
| 716 for (int i = 0; i < deoptimization_literals_.length(); ++i) { | 716 for (int i = 0; i < deoptimization_literals_.length(); ++i) { |
| 717 if (deoptimization_literals_[i].is_identical_to(literal)) return i; | 717 if (deoptimization_literals_[i].is_identical_to(literal)) return i; |
| 718 } | 718 } |
| 719 deoptimization_literals_.Add(literal); | 719 deoptimization_literals_.Add(literal, zone()); |
| 720 return result; | 720 return result; |
| 721 } | 721 } |
| 722 | 722 |
| 723 | 723 |
| 724 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { | 724 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { |
| 725 ASSERT(deoptimization_literals_.length() == 0); | 725 ASSERT(deoptimization_literals_.length() == 0); |
| 726 | 726 |
| 727 const ZoneList<Handle<JSFunction> >* inlined_closures = | 727 const ZoneList<Handle<JSFunction> >* inlined_closures = |
| 728 chunk()->inlined_closures(); | 728 chunk()->inlined_closures(); |
| 729 | 729 |
| (...skipping 27 matching lines...) Expand all Loading... |
| 757 ASSERT(expected_safepoint_kind_ == kind); | 757 ASSERT(expected_safepoint_kind_ == kind); |
| 758 | 758 |
| 759 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); | 759 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); |
| 760 Safepoint safepoint = safepoints_.DefineSafepoint(masm(), | 760 Safepoint safepoint = safepoints_.DefineSafepoint(masm(), |
| 761 kind, arguments, deopt_mode); | 761 kind, arguments, deopt_mode); |
| 762 for (int i = 0; i < operands->length(); i++) { | 762 for (int i = 0; i < operands->length(); i++) { |
| 763 LOperand* pointer = operands->at(i); | 763 LOperand* pointer = operands->at(i); |
| 764 if (pointer->IsStackSlot()) { | 764 if (pointer->IsStackSlot()) { |
| 765 safepoint.DefinePointerSlot(pointer->index(), zone()); | 765 safepoint.DefinePointerSlot(pointer->index(), zone()); |
| 766 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { | 766 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { |
| 767 safepoint.DefinePointerRegister(ToRegister(pointer)); | 767 safepoint.DefinePointerRegister(ToRegister(pointer), zone()); |
| 768 } | 768 } |
| 769 } | 769 } |
| 770 if (kind & Safepoint::kWithRegisters) { | 770 if (kind & Safepoint::kWithRegisters) { |
| 771 // Register cp always contains a pointer to the context. | 771 // Register cp always contains a pointer to the context. |
| 772 safepoint.DefinePointerRegister(cp); | 772 safepoint.DefinePointerRegister(cp, zone()); |
| 773 } | 773 } |
| 774 } | 774 } |
| 775 | 775 |
| 776 | 776 |
| 777 void LCodeGen::RecordSafepoint(LPointerMap* pointers, | 777 void LCodeGen::RecordSafepoint(LPointerMap* pointers, |
| 778 Safepoint::DeoptMode deopt_mode) { | 778 Safepoint::DeoptMode deopt_mode) { |
| 779 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode); | 779 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode); |
| 780 } | 780 } |
| 781 | 781 |
| 782 | 782 |
| 783 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { | 783 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { |
| 784 LPointerMap empty_pointers(RelocInfo::kNoPosition); | 784 LPointerMap empty_pointers(RelocInfo::kNoPosition, zone()); |
| 785 RecordSafepoint(&empty_pointers, deopt_mode); | 785 RecordSafepoint(&empty_pointers, deopt_mode); |
| 786 } | 786 } |
| 787 | 787 |
| 788 | 788 |
| 789 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, | 789 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, |
| 790 int arguments, | 790 int arguments, |
| 791 Safepoint::DeoptMode deopt_mode) { | 791 Safepoint::DeoptMode deopt_mode) { |
| 792 RecordSafepoint( | 792 RecordSafepoint( |
| 793 pointers, Safepoint::kWithRegisters, arguments, deopt_mode); | 793 pointers, Safepoint::kWithRegisters, arguments, deopt_mode); |
| 794 } | 794 } |
| (...skipping 389 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1184 __ mov(result, Operand(left, ASR, 1), LeaveCC, eq); | 1184 __ mov(result, Operand(left, ASR, 1), LeaveCC, eq); |
| 1185 __ b(eq, &done); | 1185 __ b(eq, &done); |
| 1186 | 1186 |
| 1187 __ cmp(right, Operand(4)); | 1187 __ cmp(right, Operand(4)); |
| 1188 __ tst(left, Operand(3), eq); | 1188 __ tst(left, Operand(3), eq); |
| 1189 __ mov(result, Operand(left, ASR, 2), LeaveCC, eq); | 1189 __ mov(result, Operand(left, ASR, 2), LeaveCC, eq); |
| 1190 __ b(eq, &done); | 1190 __ b(eq, &done); |
| 1191 | 1191 |
| 1192 // Call the stub. The numbers in r0 and r1 have | 1192 // Call the stub. The numbers in r0 and r1 have |
| 1193 // to be tagged to Smis. If that is not possible, deoptimize. | 1193 // to be tagged to Smis. If that is not possible, deoptimize. |
| 1194 DeferredDivI* deferred = new DeferredDivI(this, instr); | 1194 DeferredDivI* deferred = new(zone()) DeferredDivI(this, instr); |
| 1195 | 1195 |
| 1196 __ TrySmiTag(left, &deoptimize, scratch); | 1196 __ TrySmiTag(left, &deoptimize, scratch); |
| 1197 __ TrySmiTag(right, &deoptimize, scratch); | 1197 __ TrySmiTag(right, &deoptimize, scratch); |
| 1198 | 1198 |
| 1199 __ b(al, deferred->entry()); | 1199 __ b(al, deferred->entry()); |
| 1200 __ bind(deferred->exit()); | 1200 __ bind(deferred->exit()); |
| 1201 | 1201 |
| 1202 // If the result in r0 is a Smi, untag it, else deoptimize. | 1202 // If the result in r0 is a Smi, untag it, else deoptimize. |
| 1203 __ JumpIfNotSmi(result, &deoptimize); | 1203 __ JumpIfNotSmi(result, &deoptimize); |
| 1204 __ SmiUntag(result); | 1204 __ SmiUntag(result); |
| (...skipping 1085 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2290 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_); | 2290 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_); |
| 2291 } | 2291 } |
| 2292 virtual LInstruction* instr() { return instr_; } | 2292 virtual LInstruction* instr() { return instr_; } |
| 2293 Label* map_check() { return &map_check_; } | 2293 Label* map_check() { return &map_check_; } |
| 2294 private: | 2294 private: |
| 2295 LInstanceOfKnownGlobal* instr_; | 2295 LInstanceOfKnownGlobal* instr_; |
| 2296 Label map_check_; | 2296 Label map_check_; |
| 2297 }; | 2297 }; |
| 2298 | 2298 |
| 2299 DeferredInstanceOfKnownGlobal* deferred; | 2299 DeferredInstanceOfKnownGlobal* deferred; |
| 2300 deferred = new DeferredInstanceOfKnownGlobal(this, instr); | 2300 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr); |
| 2301 | 2301 |
| 2302 Label done, false_result; | 2302 Label done, false_result; |
| 2303 Register object = ToRegister(instr->InputAt(0)); | 2303 Register object = ToRegister(instr->InputAt(0)); |
| 2304 Register temp = ToRegister(instr->TempAt(0)); | 2304 Register temp = ToRegister(instr->TempAt(0)); |
| 2305 Register result = ToRegister(instr->result()); | 2305 Register result = ToRegister(instr->result()); |
| 2306 | 2306 |
| 2307 ASSERT(object.is(r0)); | 2307 ASSERT(object.is(r0)); |
| 2308 ASSERT(result.is(r0)); | 2308 ASSERT(result.is(r0)); |
| 2309 | 2309 |
| 2310 // A Smi is not instance of anything. | 2310 // A Smi is not instance of anything. |
| (...skipping 940 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3251 Representation r = instr->hydrogen()->value()->representation(); | 3251 Representation r = instr->hydrogen()->value()->representation(); |
| 3252 if (r.IsDouble()) { | 3252 if (r.IsDouble()) { |
| 3253 DwVfpRegister input = ToDoubleRegister(instr->InputAt(0)); | 3253 DwVfpRegister input = ToDoubleRegister(instr->InputAt(0)); |
| 3254 DwVfpRegister result = ToDoubleRegister(instr->result()); | 3254 DwVfpRegister result = ToDoubleRegister(instr->result()); |
| 3255 __ vabs(result, input); | 3255 __ vabs(result, input); |
| 3256 } else if (r.IsInteger32()) { | 3256 } else if (r.IsInteger32()) { |
| 3257 EmitIntegerMathAbs(instr); | 3257 EmitIntegerMathAbs(instr); |
| 3258 } else { | 3258 } else { |
| 3259 // Representation is tagged. | 3259 // Representation is tagged. |
| 3260 DeferredMathAbsTaggedHeapNumber* deferred = | 3260 DeferredMathAbsTaggedHeapNumber* deferred = |
| 3261 new DeferredMathAbsTaggedHeapNumber(this, instr); | 3261 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); |
| 3262 Register input = ToRegister(instr->InputAt(0)); | 3262 Register input = ToRegister(instr->InputAt(0)); |
| 3263 // Smi check. | 3263 // Smi check. |
| 3264 __ JumpIfNotSmi(input, deferred->entry()); | 3264 __ JumpIfNotSmi(input, deferred->entry()); |
| 3265 // If smi, handle it directly. | 3265 // If smi, handle it directly. |
| 3266 EmitIntegerMathAbs(instr); | 3266 EmitIntegerMathAbs(instr); |
| 3267 __ bind(deferred->exit()); | 3267 __ bind(deferred->exit()); |
| 3268 } | 3268 } |
| 3269 } | 3269 } |
| 3270 | 3270 |
| 3271 | 3271 |
| (...skipping 156 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3428 class DeferredDoRandom: public LDeferredCode { | 3428 class DeferredDoRandom: public LDeferredCode { |
| 3429 public: | 3429 public: |
| 3430 DeferredDoRandom(LCodeGen* codegen, LRandom* instr) | 3430 DeferredDoRandom(LCodeGen* codegen, LRandom* instr) |
| 3431 : LDeferredCode(codegen), instr_(instr) { } | 3431 : LDeferredCode(codegen), instr_(instr) { } |
| 3432 virtual void Generate() { codegen()->DoDeferredRandom(instr_); } | 3432 virtual void Generate() { codegen()->DoDeferredRandom(instr_); } |
| 3433 virtual LInstruction* instr() { return instr_; } | 3433 virtual LInstruction* instr() { return instr_; } |
| 3434 private: | 3434 private: |
| 3435 LRandom* instr_; | 3435 LRandom* instr_; |
| 3436 }; | 3436 }; |
| 3437 | 3437 |
| 3438 DeferredDoRandom* deferred = new DeferredDoRandom(this, instr); | 3438 DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr); |
| 3439 | 3439 |
| 3440 // Having marked this instruction as a call we can use any | 3440 // Having marked this instruction as a call we can use any |
| 3441 // registers. | 3441 // registers. |
| 3442 ASSERT(ToDoubleRegister(instr->result()).is(d7)); | 3442 ASSERT(ToDoubleRegister(instr->result()).is(d7)); |
| 3443 ASSERT(ToRegister(instr->InputAt(0)).is(r0)); | 3443 ASSERT(ToRegister(instr->InputAt(0)).is(r0)); |
| 3444 | 3444 |
| 3445 static const int kSeedSize = sizeof(uint32_t); | 3445 static const int kSeedSize = sizeof(uint32_t); |
| 3446 STATIC_ASSERT(kPointerSize == kSeedSize); | 3446 STATIC_ASSERT(kPointerSize == kSeedSize); |
| 3447 | 3447 |
| 3448 __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset)); | 3448 __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset)); |
| (...skipping 524 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3973 public: | 3973 public: |
| 3974 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) | 3974 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) |
| 3975 : LDeferredCode(codegen), instr_(instr) { } | 3975 : LDeferredCode(codegen), instr_(instr) { } |
| 3976 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } | 3976 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } |
| 3977 virtual LInstruction* instr() { return instr_; } | 3977 virtual LInstruction* instr() { return instr_; } |
| 3978 private: | 3978 private: |
| 3979 LStringCharCodeAt* instr_; | 3979 LStringCharCodeAt* instr_; |
| 3980 }; | 3980 }; |
| 3981 | 3981 |
| 3982 DeferredStringCharCodeAt* deferred = | 3982 DeferredStringCharCodeAt* deferred = |
| 3983 new DeferredStringCharCodeAt(this, instr); | 3983 new(zone()) DeferredStringCharCodeAt(this, instr); |
| 3984 | 3984 |
| 3985 StringCharLoadGenerator::Generate(masm(), | 3985 StringCharLoadGenerator::Generate(masm(), |
| 3986 ToRegister(instr->string()), | 3986 ToRegister(instr->string()), |
| 3987 ToRegister(instr->index()), | 3987 ToRegister(instr->index()), |
| 3988 ToRegister(instr->result()), | 3988 ToRegister(instr->result()), |
| 3989 deferred->entry()); | 3989 deferred->entry()); |
| 3990 __ bind(deferred->exit()); | 3990 __ bind(deferred->exit()); |
| 3991 } | 3991 } |
| 3992 | 3992 |
| 3993 | 3993 |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4028 public: | 4028 public: |
| 4029 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) | 4029 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) |
| 4030 : LDeferredCode(codegen), instr_(instr) { } | 4030 : LDeferredCode(codegen), instr_(instr) { } |
| 4031 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); } | 4031 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); } |
| 4032 virtual LInstruction* instr() { return instr_; } | 4032 virtual LInstruction* instr() { return instr_; } |
| 4033 private: | 4033 private: |
| 4034 LStringCharFromCode* instr_; | 4034 LStringCharFromCode* instr_; |
| 4035 }; | 4035 }; |
| 4036 | 4036 |
| 4037 DeferredStringCharFromCode* deferred = | 4037 DeferredStringCharFromCode* deferred = |
| 4038 new DeferredStringCharFromCode(this, instr); | 4038 new(zone()) DeferredStringCharFromCode(this, instr); |
| 4039 | 4039 |
| 4040 ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); | 4040 ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); |
| 4041 Register char_code = ToRegister(instr->char_code()); | 4041 Register char_code = ToRegister(instr->char_code()); |
| 4042 Register result = ToRegister(instr->result()); | 4042 Register result = ToRegister(instr->result()); |
| 4043 ASSERT(!char_code.is(result)); | 4043 ASSERT(!char_code.is(result)); |
| 4044 | 4044 |
| 4045 __ cmp(char_code, Operand(String::kMaxAsciiCharCode)); | 4045 __ cmp(char_code, Operand(String::kMaxAsciiCharCode)); |
| 4046 __ b(hi, deferred->entry()); | 4046 __ b(hi, deferred->entry()); |
| 4047 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); | 4047 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); |
| 4048 __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2)); | 4048 __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2)); |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4102 : LDeferredCode(codegen), instr_(instr) { } | 4102 : LDeferredCode(codegen), instr_(instr) { } |
| 4103 virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); } | 4103 virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); } |
| 4104 virtual LInstruction* instr() { return instr_; } | 4104 virtual LInstruction* instr() { return instr_; } |
| 4105 private: | 4105 private: |
| 4106 LNumberTagI* instr_; | 4106 LNumberTagI* instr_; |
| 4107 }; | 4107 }; |
| 4108 | 4108 |
| 4109 Register src = ToRegister(instr->InputAt(0)); | 4109 Register src = ToRegister(instr->InputAt(0)); |
| 4110 Register dst = ToRegister(instr->result()); | 4110 Register dst = ToRegister(instr->result()); |
| 4111 | 4111 |
| 4112 DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr); | 4112 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr); |
| 4113 __ SmiTag(dst, src, SetCC); | 4113 __ SmiTag(dst, src, SetCC); |
| 4114 __ b(vs, deferred->entry()); | 4114 __ b(vs, deferred->entry()); |
| 4115 __ bind(deferred->exit()); | 4115 __ bind(deferred->exit()); |
| 4116 } | 4116 } |
| 4117 | 4117 |
| 4118 | 4118 |
| 4119 void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) { | 4119 void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) { |
| 4120 Label slow; | 4120 Label slow; |
| 4121 Register src = ToRegister(instr->InputAt(0)); | 4121 Register src = ToRegister(instr->InputAt(0)); |
| 4122 Register dst = ToRegister(instr->result()); | 4122 Register dst = ToRegister(instr->result()); |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4173 private: | 4173 private: |
| 4174 LNumberTagD* instr_; | 4174 LNumberTagD* instr_; |
| 4175 }; | 4175 }; |
| 4176 | 4176 |
| 4177 DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0)); | 4177 DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0)); |
| 4178 Register scratch = scratch0(); | 4178 Register scratch = scratch0(); |
| 4179 Register reg = ToRegister(instr->result()); | 4179 Register reg = ToRegister(instr->result()); |
| 4180 Register temp1 = ToRegister(instr->TempAt(0)); | 4180 Register temp1 = ToRegister(instr->TempAt(0)); |
| 4181 Register temp2 = ToRegister(instr->TempAt(1)); | 4181 Register temp2 = ToRegister(instr->TempAt(1)); |
| 4182 | 4182 |
| 4183 DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr); | 4183 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); |
| 4184 if (FLAG_inline_new) { | 4184 if (FLAG_inline_new) { |
| 4185 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); | 4185 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); |
| 4186 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry()); | 4186 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry()); |
| 4187 } else { | 4187 } else { |
| 4188 __ jmp(deferred->entry()); | 4188 __ jmp(deferred->entry()); |
| 4189 } | 4189 } |
| 4190 __ bind(deferred->exit()); | 4190 __ bind(deferred->exit()); |
| 4191 __ sub(ip, reg, Operand(kHeapObjectTag)); | 4191 __ sub(ip, reg, Operand(kHeapObjectTag)); |
| 4192 __ vstr(input_reg, ip, HeapNumber::kValueOffset); | 4192 __ vstr(input_reg, ip, HeapNumber::kValueOffset); |
| 4193 } | 4193 } |
| (...skipping 181 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4375 private: | 4375 private: |
| 4376 LTaggedToI* instr_; | 4376 LTaggedToI* instr_; |
| 4377 }; | 4377 }; |
| 4378 | 4378 |
| 4379 LOperand* input = instr->InputAt(0); | 4379 LOperand* input = instr->InputAt(0); |
| 4380 ASSERT(input->IsRegister()); | 4380 ASSERT(input->IsRegister()); |
| 4381 ASSERT(input->Equals(instr->result())); | 4381 ASSERT(input->Equals(instr->result())); |
| 4382 | 4382 |
| 4383 Register input_reg = ToRegister(input); | 4383 Register input_reg = ToRegister(input); |
| 4384 | 4384 |
| 4385 DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr); | 4385 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); |
| 4386 | 4386 |
| 4387 // Optimistically untag the input. | 4387 // Optimistically untag the input. |
| 4388 // If the input is a HeapObject, SmiUntag will set the carry flag. | 4388 // If the input is a HeapObject, SmiUntag will set the carry flag. |
| 4389 __ SmiUntag(input_reg, SetCC); | 4389 __ SmiUntag(input_reg, SetCC); |
| 4390 // Branch to deferred code if the input was tagged. | 4390 // Branch to deferred code if the input was tagged. |
| 4391 // The deferred code will take care of restoring the tag. | 4391 // The deferred code will take care of restoring the tag. |
| 4392 __ b(cs, deferred->entry()); | 4392 __ b(cs, deferred->entry()); |
| 4393 __ bind(deferred->exit()); | 4393 __ bind(deferred->exit()); |
| 4394 } | 4394 } |
| 4395 | 4395 |
| (...skipping 239 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4635 class DeferredAllocateObject: public LDeferredCode { | 4635 class DeferredAllocateObject: public LDeferredCode { |
| 4636 public: | 4636 public: |
| 4637 DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr) | 4637 DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr) |
| 4638 : LDeferredCode(codegen), instr_(instr) { } | 4638 : LDeferredCode(codegen), instr_(instr) { } |
| 4639 virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); } | 4639 virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); } |
| 4640 virtual LInstruction* instr() { return instr_; } | 4640 virtual LInstruction* instr() { return instr_; } |
| 4641 private: | 4641 private: |
| 4642 LAllocateObject* instr_; | 4642 LAllocateObject* instr_; |
| 4643 }; | 4643 }; |
| 4644 | 4644 |
| 4645 DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr); | 4645 DeferredAllocateObject* deferred = |
| 4646 new(zone()) DeferredAllocateObject(this, instr); |
| 4646 | 4647 |
| 4647 Register result = ToRegister(instr->result()); | 4648 Register result = ToRegister(instr->result()); |
| 4648 Register scratch = ToRegister(instr->TempAt(0)); | 4649 Register scratch = ToRegister(instr->TempAt(0)); |
| 4649 Register scratch2 = ToRegister(instr->TempAt(1)); | 4650 Register scratch2 = ToRegister(instr->TempAt(1)); |
| 4650 Handle<JSFunction> constructor = instr->hydrogen()->constructor(); | 4651 Handle<JSFunction> constructor = instr->hydrogen()->constructor(); |
| 4651 Handle<Map> initial_map(constructor->initial_map()); | 4652 Handle<Map> initial_map(constructor->initial_map()); |
| 4652 int instance_size = initial_map->instance_size(); | 4653 int instance_size = initial_map->instance_size(); |
| 4653 ASSERT(initial_map->pre_allocated_property_fields() + | 4654 ASSERT(initial_map->pre_allocated_property_fields() + |
| 4654 initial_map->unused_property_fields() - | 4655 initial_map->unused_property_fields() - |
| 4655 initial_map->inobject_properties() == 0); | 4656 initial_map->inobject_properties() == 0); |
| (...skipping 583 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5239 StackCheckStub stub; | 5240 StackCheckStub stub; |
| 5240 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 5241 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 5241 EnsureSpaceForLazyDeopt(); | 5242 EnsureSpaceForLazyDeopt(); |
| 5242 __ bind(&done); | 5243 __ bind(&done); |
| 5243 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); | 5244 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); |
| 5244 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); | 5245 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
| 5245 } else { | 5246 } else { |
| 5246 ASSERT(instr->hydrogen()->is_backwards_branch()); | 5247 ASSERT(instr->hydrogen()->is_backwards_branch()); |
| 5247 // Perform stack overflow check if this goto needs it before jumping. | 5248 // Perform stack overflow check if this goto needs it before jumping. |
| 5248 DeferredStackCheck* deferred_stack_check = | 5249 DeferredStackCheck* deferred_stack_check = |
| 5249 new DeferredStackCheck(this, instr); | 5250 new(zone()) DeferredStackCheck(this, instr); |
| 5250 __ LoadRoot(ip, Heap::kStackLimitRootIndex); | 5251 __ LoadRoot(ip, Heap::kStackLimitRootIndex); |
| 5251 __ cmp(sp, Operand(ip)); | 5252 __ cmp(sp, Operand(ip)); |
| 5252 __ b(lo, deferred_stack_check->entry()); | 5253 __ b(lo, deferred_stack_check->entry()); |
| 5253 EnsureSpaceForLazyDeopt(); | 5254 EnsureSpaceForLazyDeopt(); |
| 5254 __ bind(instr->done_label()); | 5255 __ bind(instr->done_label()); |
| 5255 deferred_stack_check->SetExit(instr->done_label()); | 5256 deferred_stack_check->SetExit(instr->done_label()); |
| 5256 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); | 5257 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); |
| 5257 // Don't record a deoptimization index for the safepoint here. | 5258 // Don't record a deoptimization index for the safepoint here. |
| 5258 // This will be done explicitly when emitting call and the safepoint in | 5259 // This will be done explicitly when emitting call and the safepoint in |
| 5259 // the deferred code. | 5260 // the deferred code. |
| (...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5358 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); | 5359 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); |
| 5359 __ ldr(result, FieldMemOperand(scratch, | 5360 __ ldr(result, FieldMemOperand(scratch, |
| 5360 FixedArray::kHeaderSize - kPointerSize)); | 5361 FixedArray::kHeaderSize - kPointerSize)); |
| 5361 __ bind(&done); | 5362 __ bind(&done); |
| 5362 } | 5363 } |
| 5363 | 5364 |
| 5364 | 5365 |
| 5365 #undef __ | 5366 #undef __ |
| 5366 | 5367 |
| 5367 } } // namespace v8::internal | 5368 } } // namespace v8::internal |
| OLD | NEW |