| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 70 Address prev_call_address = NULL; | 70 Address prev_call_address = NULL; |
| 71 #endif | 71 #endif |
| 72 for (int i = 0; i < deopt_data->DeoptCount(); i++) { | 72 for (int i = 0; i < deopt_data->DeoptCount(); i++) { |
| 73 if (deopt_data->Pc(i)->value() == -1) continue; | 73 if (deopt_data->Pc(i)->value() == -1) continue; |
| 74 Address call_address = code_start_address + deopt_data->Pc(i)->value(); | 74 Address call_address = code_start_address + deopt_data->Pc(i)->value(); |
| 75 Address deopt_entry = GetDeoptimizationEntry(i, LAZY); | 75 Address deopt_entry = GetDeoptimizationEntry(i, LAZY); |
| 76 // We need calls to have a predictable size in the unoptimized code, but | 76 // We need calls to have a predictable size in the unoptimized code, but |
| 77 // this is optimized code, so we don't have to have a predictable size. | 77 // this is optimized code, so we don't have to have a predictable size. |
| 78 int call_size_in_bytes = | 78 int call_size_in_bytes = |
| 79 MacroAssembler::CallSizeNotPredictableCodeSize(deopt_entry, | 79 MacroAssembler::CallSizeNotPredictableCodeSize(deopt_entry, |
| 80 RelocInfo::NONE); | 80 RelocInfo::NONE32); |
| 81 int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize; | 81 int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize; |
| 82 ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0); | 82 ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0); |
| 83 ASSERT(call_size_in_bytes <= patch_size()); | 83 ASSERT(call_size_in_bytes <= patch_size()); |
| 84 CodePatcher patcher(call_address, call_size_in_words); | 84 CodePatcher patcher(call_address, call_size_in_words); |
| 85 patcher.masm()->Call(deopt_entry, RelocInfo::NONE); | 85 patcher.masm()->Call(deopt_entry, RelocInfo::NONE32); |
| 86 ASSERT(prev_call_address == NULL || | 86 ASSERT(prev_call_address == NULL || |
| 87 call_address >= prev_call_address + patch_size()); | 87 call_address >= prev_call_address + patch_size()); |
| 88 ASSERT(call_address + patch_size() <= code->instruction_end()); | 88 ASSERT(call_address + patch_size() <= code->instruction_end()); |
| 89 #ifdef DEBUG | 89 #ifdef DEBUG |
| 90 prev_call_address = call_address; | 90 prev_call_address = call_address; |
| 91 #endif | 91 #endif |
| 92 } | 92 } |
| 93 | 93 |
| 94 Isolate* isolate = code->GetIsolate(); | 94 Isolate* isolate = code->GetIsolate(); |
| 95 | 95 |
| (...skipping 858 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 954 const int kSavedRegistersAreaSize = | 954 const int kSavedRegistersAreaSize = |
| 955 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize; | 955 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize; |
| 956 | 956 |
| 957 // Get the bailout id from the stack. | 957 // Get the bailout id from the stack. |
| 958 __ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize)); | 958 __ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize)); |
| 959 | 959 |
| 960 // Get the address of the location in the code object if possible (r3) (return | 960 // Get the address of the location in the code object if possible (r3) (return |
| 961 // address for lazy deoptimization) and compute the fp-to-sp delta in | 961 // address for lazy deoptimization) and compute the fp-to-sp delta in |
| 962 // register r4. | 962 // register r4. |
| 963 if (type() == EAGER) { | 963 if (type() == EAGER) { |
| 964 __ mov(r3, Operand(0)); | 964 __ mov(r3, Operand::Zero()); |
| 965 // Correct one word for bailout id. | 965 // Correct one word for bailout id. |
| 966 __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); | 966 __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); |
| 967 } else if (type() == OSR) { | 967 } else if (type() == OSR) { |
| 968 __ mov(r3, lr); | 968 __ mov(r3, lr); |
| 969 // Correct one word for bailout id. | 969 // Correct one word for bailout id. |
| 970 __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); | 970 __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); |
| 971 } else { | 971 } else { |
| 972 __ mov(r3, lr); | 972 __ mov(r3, lr); |
| 973 // Correct two words for bailout id and return address. | 973 // Correct two words for bailout id and return address. |
| 974 __ add(r4, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize))); | 974 __ add(r4, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize))); |
| (...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1059 __ add(r1, r0, Operand(r1, LSL, 2)); | 1059 __ add(r1, r0, Operand(r1, LSL, 2)); |
| 1060 __ bind(&outer_push_loop); | 1060 __ bind(&outer_push_loop); |
| 1061 // Inner loop state: r2 = current FrameDescription*, r3 = loop index. | 1061 // Inner loop state: r2 = current FrameDescription*, r3 = loop index. |
| 1062 __ ldr(r2, MemOperand(r0, 0)); // output_[ix] | 1062 __ ldr(r2, MemOperand(r0, 0)); // output_[ix] |
| 1063 __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset())); | 1063 __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset())); |
| 1064 __ bind(&inner_push_loop); | 1064 __ bind(&inner_push_loop); |
| 1065 __ sub(r3, r3, Operand(sizeof(uint32_t))); | 1065 __ sub(r3, r3, Operand(sizeof(uint32_t))); |
| 1066 __ add(r6, r2, Operand(r3)); | 1066 __ add(r6, r2, Operand(r3)); |
| 1067 __ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset())); | 1067 __ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset())); |
| 1068 __ push(r7); | 1068 __ push(r7); |
| 1069 __ cmp(r3, Operand(0)); | 1069 __ cmp(r3, Operand::Zero()); |
| 1070 __ b(ne, &inner_push_loop); // test for gt? | 1070 __ b(ne, &inner_push_loop); // test for gt? |
| 1071 __ add(r0, r0, Operand(kPointerSize)); | 1071 __ add(r0, r0, Operand(kPointerSize)); |
| 1072 __ cmp(r0, r1); | 1072 __ cmp(r0, r1); |
| 1073 __ b(lt, &outer_push_loop); | 1073 __ b(lt, &outer_push_loop); |
| 1074 | 1074 |
| 1075 // Push state, pc, and continuation from the last output frame. | 1075 // Push state, pc, and continuation from the last output frame. |
| 1076 if (type() != OSR) { | 1076 if (type() != OSR) { |
| 1077 __ ldr(r6, MemOperand(r2, FrameDescription::state_offset())); | 1077 __ ldr(r6, MemOperand(r2, FrameDescription::state_offset())); |
| 1078 __ push(r6); | 1078 __ push(r6); |
| 1079 } | 1079 } |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1122 __ push(ip); | 1122 __ push(ip); |
| 1123 __ b(&done); | 1123 __ b(&done); |
| 1124 ASSERT(masm()->pc_offset() - start == table_entry_size_); | 1124 ASSERT(masm()->pc_offset() - start == table_entry_size_); |
| 1125 } | 1125 } |
| 1126 __ bind(&done); | 1126 __ bind(&done); |
| 1127 } | 1127 } |
| 1128 | 1128 |
| 1129 #undef __ | 1129 #undef __ |
| 1130 | 1130 |
| 1131 } } // namespace v8::internal | 1131 } } // namespace v8::internal |
| OLD | NEW |