| Index: src/arm/deoptimizer-arm.cc
|
| diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
|
| index 5339be1d843c727d4f54e331da949338bee2ff79..7ce5224b32072bafd9079b7c37bb48dbeae4fee1 100644
|
| --- a/src/arm/deoptimizer-arm.cc
|
| +++ b/src/arm/deoptimizer-arm.cc
|
| @@ -77,12 +77,12 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
|
| // this is optimized code, so we don't have to have a predictable size.
|
| int call_size_in_bytes =
|
| MacroAssembler::CallSizeNotPredictableCodeSize(deopt_entry,
|
| - RelocInfo::NONE);
|
| + RelocInfo::NONE32);
|
| int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
|
| ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
|
| ASSERT(call_size_in_bytes <= patch_size());
|
| CodePatcher patcher(call_address, call_size_in_words);
|
| - patcher.masm()->Call(deopt_entry, RelocInfo::NONE);
|
| + patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
|
| ASSERT(prev_call_address == NULL ||
|
| call_address >= prev_call_address + patch_size());
|
| ASSERT(call_address + patch_size() <= code->instruction_end());
|
| @@ -961,7 +961,7 @@ void Deoptimizer::EntryGenerator::Generate() {
|
| // address for lazy deoptimization) and compute the fp-to-sp delta in
|
| // register r4.
|
| if (type() == EAGER) {
|
| - __ mov(r3, Operand(0));
|
| + __ mov(r3, Operand::Zero());
|
| // Correct one word for bailout id.
|
| __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
|
| } else if (type() == OSR) {
|
| @@ -1066,7 +1066,7 @@ void Deoptimizer::EntryGenerator::Generate() {
|
| __ add(r6, r2, Operand(r3));
|
| __ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
|
| __ push(r7);
|
| - __ cmp(r3, Operand(0));
|
| + __ cmp(r3, Operand::Zero());
|
| __ b(ne, &inner_push_loop); // test for gt?
|
| __ add(r0, r0, Operand(kPointerSize));
|
| __ cmp(r0, r1);
|
|
|