| Index: src/x64/lithium-codegen-x64.cc
|
| diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
|
| index 6c8e377d637616d99283fdd027bebd93dd69e1a3..dc6ddf9f7e41595e10b58c033e04fdc4cd958e93 100644
|
| --- a/src/x64/lithium-codegen-x64.cc
|
| +++ b/src/x64/lithium-codegen-x64.cc
|
| @@ -157,7 +157,7 @@ bool LCodeGen::GeneratePrologue() {
|
| #endif
|
| __ push(rax);
|
| __ Set(rax, slots);
|
| - __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE64);
|
| + __ movq(kScratchRegister, kSlotsZapValue);
|
| Label loop;
|
| __ bind(&loop);
|
| __ movq(MemOperand(rsp, rax, times_pointer_size, 0),
|
| @@ -261,11 +261,12 @@ bool LCodeGen::GenerateJumpTable() {
|
| Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
|
| }
|
| if (jump_table_[i].needs_frame) {
|
| - __ movq(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
|
| + __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
|
| if (needs_frame.is_bound()) {
|
| __ jmp(&needs_frame);
|
| } else {
|
| __ bind(&needs_frame);
|
| + __ movq(rsi, MemOperand(rbp, StandardFrameConstants::kContextOffset));
|
| __ push(rbp);
|
| __ movq(rbp, rsp);
|
| __ push(rsi);
|
| @@ -589,10 +590,30 @@ void LCodeGen::CallRuntime(const Runtime::Function* function,
|
| }
|
|
|
|
|
| +void LCodeGen::LoadContextFromDeferred(LOperand* context) {
|
| + if (context->IsRegister()) {
|
| + if (!ToRegister(context).is(rsi)) {
|
| + __ movq(rsi, ToRegister(context));
|
| + }
|
| + } else if (context->IsStackSlot()) {
|
| + __ movq(rsi, ToOperand(context));
|
| + } else if (context->IsConstantOperand()) {
|
| + HConstant* constant =
|
| + chunk_->LookupConstant(LConstantOperand::cast(context));
|
| + __ Move(rsi, Handle<Object>::cast(constant->handle(isolate())));
|
| + } else {
|
| + UNREACHABLE();
|
| + }
|
| +}
|
| +
|
| +
|
| +
|
| void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
|
| int argc,
|
| - LInstruction* instr) {
|
| - __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
|
| + LInstruction* instr,
|
| + LOperand* context) {
|
| + LoadContextFromDeferred(context);
|
| +
|
| __ CallRuntimeSaveDoubles(id);
|
| RecordSafepointWithRegisters(
|
| instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
|
| @@ -649,7 +670,27 @@ void LCodeGen::DeoptimizeIf(Condition cc,
|
| return;
|
| }
|
|
|
| - ASSERT(FLAG_deopt_every_n_times == 0); // Not yet implemented on x64.
|
| + if (DeoptEveryNTimes()) {
|
| + ExternalReference count = ExternalReference::stress_deopt_count(isolate());
|
| + Label no_deopt;
|
| + __ pushfq();
|
| + __ push(rax);
|
| + Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
|
| + __ movl(rax, count_operand);
|
| + __ subl(rax, Immediate(1));
|
| + __ j(not_zero, &no_deopt, Label::kNear);
|
| + if (FLAG_trap_on_deopt) __ int3();
|
| + __ movl(rax, Immediate(FLAG_deopt_every_n_times));
|
| + __ movl(count_operand, rax);
|
| + __ pop(rax);
|
| + __ popfq();
|
| + ASSERT(frame_is_built_);
|
| + __ call(entry, RelocInfo::RUNTIME_ENTRY);
|
| + __ bind(&no_deopt);
|
| + __ movl(count_operand, rax);
|
| + __ pop(rax);
|
| + __ popfq();
|
| + }
|
|
|
| if (info()->ShouldTrapOnDeopt()) {
|
| Label done;
|
| @@ -816,10 +857,6 @@ void LCodeGen::RecordSafepoint(
|
| safepoint.DefinePointerRegister(ToRegister(pointer), zone());
|
| }
|
| }
|
| - if (kind & Safepoint::kWithRegisters) {
|
| - // Register rsi always contains a pointer to the context.
|
| - safepoint.DefinePointerRegister(rsi, zone());
|
| - }
|
| }
|
|
|
|
|
| @@ -895,6 +932,7 @@ void LCodeGen::DoParameter(LParameter* instr) {
|
|
|
|
|
| void LCodeGen::DoCallStub(LCallStub* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| ASSERT(ToRegister(instr->result()).is(rax));
|
| switch (instr->hydrogen()->major_key()) {
|
| case CodeStub::RegExpConstructResult: {
|
| @@ -1123,7 +1161,7 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
|
| __ neg(reg1);
|
| DeoptimizeIf(zero, instr->environment());
|
| }
|
| - __ movq(reg2, multiplier, RelocInfo::NONE64);
|
| + __ Set(reg2, multiplier);
|
| // Result just fit in r64, because it's int32 * uint32.
|
| __ imul(reg2, reg1);
|
|
|
| @@ -1619,51 +1657,107 @@ void LCodeGen::DoDateField(LDateField* instr) {
|
| __ j(not_equal, &runtime, Label::kNear);
|
| __ movq(result, FieldOperand(object, JSDate::kValueOffset +
|
| kPointerSize * index->value()));
|
| - __ jmp(&done);
|
| + __ jmp(&done, Label::kNear);
|
| }
|
| __ bind(&runtime);
|
| __ PrepareCallCFunction(2);
|
| __ movq(arg_reg_1, object);
|
| __ movq(arg_reg_2, index, RelocInfo::NONE64);
|
| __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
|
| - __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
|
| __ bind(&done);
|
| }
|
| }
|
|
|
|
|
| -void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
|
| +Operand LCodeGen::BuildSeqStringOperand(Register string,
|
| + LOperand* index,
|
| + String::Encoding encoding) {
|
| + if (index->IsConstantOperand()) {
|
| + int offset = ToInteger32(LConstantOperand::cast(index));
|
| + if (encoding == String::TWO_BYTE_ENCODING) {
|
| + offset *= kUC16Size;
|
| + }
|
| + STATIC_ASSERT(kCharSize == 1);
|
| + return FieldOperand(string, SeqString::kHeaderSize + offset);
|
| + }
|
| + return FieldOperand(
|
| + string, ToRegister(index),
|
| + encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
|
| + SeqString::kHeaderSize);
|
| +}
|
| +
|
| +
|
| +void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
|
| + String::Encoding encoding = instr->hydrogen()->encoding();
|
| + Register result = ToRegister(instr->result());
|
| Register string = ToRegister(instr->string());
|
| - Register index = ToRegister(instr->index());
|
| - Register value = ToRegister(instr->value());
|
| - String::Encoding encoding = instr->encoding();
|
|
|
| if (FLAG_debug_code) {
|
| - __ push(value);
|
| - __ movq(value, FieldOperand(string, HeapObject::kMapOffset));
|
| - __ movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
|
| + __ push(string);
|
| + __ movq(string, FieldOperand(string, HeapObject::kMapOffset));
|
| + __ movzxbq(string, FieldOperand(string, Map::kInstanceTypeOffset));
|
|
|
| - __ andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
|
| + __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
|
| static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
|
| static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
|
| - __ cmpq(value, Immediate(encoding == String::ONE_BYTE_ENCODING
|
| - ? one_byte_seq_type : two_byte_seq_type));
|
| + __ cmpq(string, Immediate(encoding == String::ONE_BYTE_ENCODING
|
| + ? one_byte_seq_type : two_byte_seq_type));
|
| __ Check(equal, kUnexpectedStringType);
|
| - __ pop(value);
|
| + __ pop(string);
|
| }
|
|
|
| + Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
|
| if (encoding == String::ONE_BYTE_ENCODING) {
|
| - __ movb(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
|
| - value);
|
| + __ movzxbl(result, operand);
|
| + } else {
|
| + __ movzxwl(result, operand);
|
| + }
|
| +}
|
| +
|
| +
|
| +void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
|
| + String::Encoding encoding = instr->hydrogen()->encoding();
|
| + Register string = ToRegister(instr->string());
|
| +
|
| + if (FLAG_debug_code) {
|
| + __ push(string);
|
| + __ movq(string, FieldOperand(string, HeapObject::kMapOffset));
|
| + __ movzxbq(string, FieldOperand(string, Map::kInstanceTypeOffset));
|
| +
|
| + __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
|
| + static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
|
| + static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
|
| + __ cmpq(string, Immediate(encoding == String::ONE_BYTE_ENCODING
|
| + ? one_byte_seq_type : two_byte_seq_type));
|
| + __ Check(equal, kUnexpectedStringType);
|
| + __ pop(string);
|
| + }
|
| +
|
| + Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
|
| + if (instr->value()->IsConstantOperand()) {
|
| + int value = ToInteger32(LConstantOperand::cast(instr->value()));
|
| + ASSERT_LE(0, value);
|
| + if (encoding == String::ONE_BYTE_ENCODING) {
|
| + ASSERT_LE(value, String::kMaxOneByteCharCode);
|
| + __ movb(operand, Immediate(value));
|
| + } else {
|
| + ASSERT_LE(value, String::kMaxUtf16CodeUnit);
|
| + __ movw(operand, Immediate(value));
|
| + }
|
| } else {
|
| - __ movw(FieldOperand(string, index, times_2, SeqString::kHeaderSize),
|
| - value);
|
| + Register value = ToRegister(instr->value());
|
| + if (encoding == String::ONE_BYTE_ENCODING) {
|
| + __ movb(operand, value);
|
| + } else {
|
| + __ movw(operand, value);
|
| + }
|
| }
|
| }
|
|
|
|
|
| void LCodeGen::DoThrow(LThrow* instr) {
|
| __ push(ToRegister(instr->value()));
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| CallRuntime(Runtime::kThrow, 1, instr);
|
|
|
| if (FLAG_debug_code) {
|
| @@ -1771,7 +1865,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
|
| __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
|
| // At this point, both left and right are either 0 or -0.
|
| if (operation == HMathMinMax::kMathMin) {
|
| - __ orpd(left_reg, right_reg);
|
| + __ orps(left_reg, right_reg);
|
| } else {
|
| // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
|
| __ addsd(left_reg, right_reg);
|
| @@ -1782,7 +1876,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
|
| __ ucomisd(left_reg, left_reg); // NaN check.
|
| __ j(parity_even, &return_left, Label::kNear);
|
| __ bind(&return_right);
|
| - __ movsd(left_reg, right_reg);
|
| + __ movaps(left_reg, right_reg);
|
|
|
| __ bind(&return_left);
|
| }
|
| @@ -1818,7 +1912,6 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
|
| ASSERT(right.is(xmm1));
|
| __ CallCFunction(
|
| ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
|
| - __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
|
| __ movaps(result, xmm_scratch);
|
| break;
|
| }
|
| @@ -1830,6 +1923,7 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
|
|
|
|
|
| void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| ASSERT(ToRegister(instr->left()).is(rdx));
|
| ASSERT(ToRegister(instr->right()).is(rax));
|
| ASSERT(ToRegister(instr->result()).is(rax));
|
| @@ -2152,6 +2246,33 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
|
| }
|
|
|
|
|
| +void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
|
| + Representation rep = instr->hydrogen()->value()->representation();
|
| + ASSERT(!rep.IsInteger32());
|
| +
|
| + if (rep.IsDouble()) {
|
| + XMMRegister value = ToDoubleRegister(instr->value());
|
| + XMMRegister xmm_scratch = double_scratch0();
|
| + __ xorps(xmm_scratch, xmm_scratch);
|
| + __ ucomisd(xmm_scratch, value);
|
| + EmitFalseBranch(instr, not_equal);
|
| + __ movmskpd(kScratchRegister, value);
|
| + __ testl(kScratchRegister, Immediate(1));
|
| + EmitBranch(instr, not_zero);
|
| + } else {
|
| + Register value = ToRegister(instr->value());
|
| + Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
|
| + __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
|
| + __ cmpl(FieldOperand(value, HeapNumber::kExponentOffset),
|
| + Immediate(0x80000000));
|
| + EmitFalseBranch(instr, not_equal);
|
| + __ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset),
|
| + Immediate(0x00000000));
|
| + EmitBranch(instr, equal);
|
| + }
|
| +}
|
| +
|
| +
|
| Condition LCodeGen::EmitIsObject(Register input,
|
| Label* is_not_object,
|
| Label* is_object) {
|
| @@ -2244,6 +2365,7 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
|
|
|
|
|
| void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| Token::Value op = instr->op();
|
|
|
| Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
|
| @@ -2400,6 +2522,7 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
|
|
|
|
|
| void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| InstanceofStub stub(InstanceofStub::kNoFlags);
|
| __ push(ToRegister(instr->left()));
|
| __ push(ToRegister(instr->right()));
|
| @@ -2431,7 +2554,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
|
| Label map_check_;
|
| };
|
|
|
| -
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| DeferredInstanceOfKnownGlobal* deferred;
|
| deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
|
|
|
| @@ -2439,7 +2562,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
|
| Register object = ToRegister(instr->value());
|
|
|
| // A Smi is not an instance of anything.
|
| - __ JumpIfSmi(object, &false_result);
|
| + __ JumpIfSmi(object, &false_result, Label::kNear);
|
|
|
| // This is the inlined call site instanceof cache. The two occurences of the
|
| // hole value will be patched to the last map/result pair generated by the
|
| @@ -2461,7 +2584,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
|
| __ bind(&end_of_patched_code);
|
| ASSERT(true);
|
| #endif
|
| - __ jmp(&done);
|
| + __ jmp(&done, Label::kNear);
|
|
|
| // The inlined call site cache did not match. Check for null and string
|
| // before calling the deferred code.
|
| @@ -2516,9 +2639,9 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
|
| __ testq(kScratchRegister, kScratchRegister);
|
| Label load_false;
|
| Label done;
|
| - __ j(not_zero, &load_false);
|
| + __ j(not_zero, &load_false, Label::kNear);
|
| __ LoadRoot(rax, Heap::kTrueValueRootIndex);
|
| - __ jmp(&done);
|
| + __ jmp(&done, Label::kNear);
|
| __ bind(&load_false);
|
| __ LoadRoot(rax, Heap::kFalseValueRootIndex);
|
| __ bind(&done);
|
| @@ -2526,6 +2649,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
|
|
|
|
|
| void LCodeGen::DoCmpT(LCmpT* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| Token::Value op = instr->op();
|
|
|
| Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
|
| @@ -2545,9 +2669,12 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
|
|
|
| void LCodeGen::DoReturn(LReturn* instr) {
|
| if (FLAG_trace && info()->IsOptimizing()) {
|
| - // Preserve the return value on the stack and rely on the runtime
|
| - // call to return the value in the same register.
|
| + // Preserve the return value on the stack and rely on the runtime call
|
| + // to return the value in the same register. We're leaving the code
|
| + // managed by the register allocator and tearing down the frame, it's
|
| + // safe to write to the context register.
|
| __ push(rax);
|
| + __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
|
| __ CallRuntime(Runtime::kTraceExit, 1);
|
| }
|
| if (info()->saves_caller_doubles()) {
|
| @@ -2598,6 +2725,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
|
|
|
|
|
| void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| ASSERT(ToRegister(instr->global_object()).is(rax));
|
| ASSERT(ToRegister(instr->result()).is(rax));
|
|
|
| @@ -2636,6 +2764,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
|
|
|
|
|
| void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| ASSERT(ToRegister(instr->global_object()).is(rdx));
|
| ASSERT(ToRegister(instr->value()).is(rax));
|
|
|
| @@ -2735,6 +2864,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
|
|
|
|
|
| void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| ASSERT(ToRegister(instr->object()).is(rax));
|
| ASSERT(ToRegister(instr->result()).is(rax));
|
|
|
| @@ -3014,6 +3144,7 @@ Operand LCodeGen::BuildFastArrayOperand(
|
|
|
|
|
| void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| ASSERT(ToRegister(instr->object()).is(rdx));
|
| ASSERT(ToRegister(instr->key()).is(rax));
|
|
|
| @@ -3083,6 +3214,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
|
| // object as a receiver to normal functions. Values have to be
|
| // passed unchanged to builtins and strict-mode functions.
|
| Label global_object, receiver_ok;
|
| + Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
|
|
|
| // Do not transform the receiver to object for strict mode
|
| // functions.
|
| @@ -3091,13 +3223,13 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
|
| __ testb(FieldOperand(kScratchRegister,
|
| SharedFunctionInfo::kStrictModeByteOffset),
|
| Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
|
| - __ j(not_equal, &receiver_ok, Label::kNear);
|
| + __ j(not_equal, &receiver_ok, dist);
|
|
|
| // Do not transform the receiver to object for builtins.
|
| __ testb(FieldOperand(kScratchRegister,
|
| SharedFunctionInfo::kNativeByteOffset),
|
| Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
|
| - __ j(not_equal, &receiver_ok, Label::kNear);
|
| + __ j(not_equal, &receiver_ok, dist);
|
|
|
| // Normal function. Replace undefined or null with global receiver.
|
| __ CompareRoot(receiver, Heap::kNullValueRootIndex);
|
| @@ -3116,7 +3248,8 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
|
| // TODO(kmillikin): We have a hydrogen value for the global object. See
|
| // if it's better to use it than to explicitly fetch it from the context
|
| // here.
|
| - __ movq(receiver, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
|
| + __ movq(receiver, Operand(rbp, StandardFrameConstants::kContextOffset));
|
| + __ movq(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
|
| __ movq(receiver,
|
| FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
|
| __ bind(&receiver_ok);
|
| @@ -3163,7 +3296,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
|
| ParameterCount actual(rax);
|
| __ InvokeFunction(function, actual, CALL_FUNCTION,
|
| safepoint_generator, CALL_AS_METHOD);
|
| - __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
|
| }
|
|
|
|
|
| @@ -3186,7 +3318,12 @@ void LCodeGen::DoThisFunction(LThisFunction* instr) {
|
|
|
| void LCodeGen::DoContext(LContext* instr) {
|
| Register result = ToRegister(instr->result());
|
| - __ movq(result, rsi);
|
| + if (info()->IsOptimizing()) {
|
| + __ movq(result, Operand(rbp, StandardFrameConstants::kContextOffset));
|
| + } else {
|
| + // If there is no frame, the context must be in rsi.
|
| + ASSERT(result.is(rsi));
|
| + }
|
| }
|
|
|
|
|
| @@ -3199,6 +3336,7 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) {
|
|
|
|
|
| void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| __ push(rsi); // The context is the first argument.
|
| __ Push(instr->hydrogen()->pairs());
|
| __ Push(Smi::FromInt(instr->hydrogen()->flags()));
|
| @@ -3207,8 +3345,10 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
|
|
|
|
|
| void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
|
| + Register context = ToRegister(instr->context());
|
| Register result = ToRegister(instr->result());
|
| - __ movq(result, GlobalObjectOperand());
|
| + __ movq(result,
|
| + Operand(context, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
|
| }
|
|
|
|
|
| @@ -3265,9 +3405,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
|
| __ InvokeFunction(
|
| function, expected, count, CALL_FUNCTION, generator, call_kind);
|
| }
|
| -
|
| - // Restore context.
|
| - __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
|
| }
|
|
|
|
|
| @@ -3308,17 +3445,18 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
|
|
|
| // Slow case: Call the runtime system to do the number allocation.
|
| __ bind(&slow);
|
| - CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
|
| + CallRuntimeFromDeferred(
|
| + Runtime::kAllocateHeapNumber, 0, instr, instr->context());
|
| // Set the pointer to the new heap number in tmp.
|
| if (!tmp.is(rax)) __ movq(tmp, rax);
|
| // Restore input_reg after call to runtime.
|
| __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
|
|
|
| __ bind(&allocated);
|
| - __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
|
| + __ MoveDouble(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
|
| __ shl(tmp2, Immediate(1));
|
| __ shr(tmp2, Immediate(1));
|
| - __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
|
| + __ MoveDouble(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
|
| __ StoreToSafepointRegisterSlot(input_reg, tmp);
|
|
|
| __ bind(&done);
|
| @@ -3369,7 +3507,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
|
| XMMRegister input_reg = ToDoubleRegister(instr->value());
|
| __ xorps(scratch, scratch);
|
| __ subsd(scratch, input_reg);
|
| - __ andpd(input_reg, scratch);
|
| + __ andps(input_reg, scratch);
|
| } else if (r.IsInteger32()) {
|
| EmitIntegerMathAbs(instr);
|
| } else if (r.IsSmi()) {
|
| @@ -3419,7 +3557,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
|
| __ testq(output_reg, Immediate(1));
|
| DeoptimizeIf(not_zero, instr->environment());
|
| __ Set(output_reg, 0);
|
| - __ jmp(&done);
|
| + __ jmp(&done, Label::kNear);
|
| __ bind(&positive_sign);
|
| }
|
|
|
| @@ -3453,10 +3591,11 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
|
| static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
|
|
|
| Label done, round_to_zero, below_one_half, do_not_compensate, restore;
|
| - __ movq(kScratchRegister, one_half, RelocInfo::NONE64);
|
| + Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
|
| + __ movq(kScratchRegister, one_half);
|
| __ movq(xmm_scratch, kScratchRegister);
|
| __ ucomisd(xmm_scratch, input_reg);
|
| - __ j(above, &below_one_half);
|
| + __ j(above, &below_one_half, Label::kNear);
|
|
|
| // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
|
| __ addsd(xmm_scratch, input_reg);
|
| @@ -3465,13 +3604,13 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
|
| __ cmpl(output_reg, Immediate(0x80000000));
|
| __ RecordComment("D2I conversion overflow");
|
| DeoptimizeIf(equal, instr->environment());
|
| - __ jmp(&done);
|
| + __ jmp(&done, dist);
|
|
|
| __ bind(&below_one_half);
|
| - __ movq(kScratchRegister, minus_one_half, RelocInfo::NONE64);
|
| + __ movq(kScratchRegister, minus_one_half);
|
| __ movq(xmm_scratch, kScratchRegister);
|
| __ ucomisd(xmm_scratch, input_reg);
|
| - __ j(below_equal, &round_to_zero);
|
| + __ j(below_equal, &round_to_zero, Label::kNear);
|
|
|
| // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
|
| // compare and compensate.
|
| @@ -3490,7 +3629,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
|
| // No overflow because we already ruled out minint.
|
| __ bind(&restore);
|
| __ movq(input_reg, kScratchRegister); // Restore input_reg.
|
| - __ jmp(&done);
|
| + __ jmp(&done, dist);
|
|
|
| __ bind(&round_to_zero);
|
| // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
|
| @@ -3524,7 +3663,7 @@ void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
|
| Label done, sqrt;
|
| // Check base for -Infinity. According to IEEE-754, double-precision
|
| // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
|
| - __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE64);
|
| + __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
|
| __ movq(xmm_scratch, kScratchRegister);
|
| __ ucomisd(xmm_scratch, input_reg);
|
| // Comparing -Infinity with NaN results in "unordered", which sets the
|
| @@ -3563,7 +3702,7 @@ void LCodeGen::DoPower(LPower* instr) {
|
| __ CallStub(&stub);
|
| } else if (exponent_type.IsTagged()) {
|
| Label no_deopt;
|
| - __ JumpIfSmi(exponent, &no_deopt);
|
| + __ JumpIfSmi(exponent, &no_deopt, Label::kNear);
|
| __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
|
| DeoptimizeIf(not_equal, instr->environment());
|
| __ bind(&no_deopt);
|
| @@ -3632,8 +3771,7 @@ void LCodeGen::DoRandom(LRandom* instr) {
|
| // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
|
| XMMRegister result = ToDoubleRegister(instr->result());
|
| XMMRegister scratch4 = double_scratch0();
|
| - __ movq(scratch3, V8_INT64_C(0x4130000000000000),
|
| - RelocInfo::NONE64); // 1.0 x 2^20 as double
|
| + __ movq(scratch3, V8_INT64_C(0x4130000000000000)); // 1.0 x 2^20 as double
|
| __ movq(scratch4, scratch3);
|
| __ movd(result, random);
|
| __ xorps(result, scratch4);
|
| @@ -3687,6 +3825,9 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
|
|
|
| void LCodeGen::DoMathTan(LMathTan* instr) {
|
| ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
|
| + // Set the context register to a GC-safe fake value. Clobbering it is
|
| + // OK because this instruction is marked as a call.
|
| + __ Set(rsi, 0);
|
| TranscendentalCacheStub stub(TranscendentalCache::TAN,
|
| TranscendentalCacheStub::UNTAGGED);
|
| CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
| @@ -3695,6 +3836,9 @@ void LCodeGen::DoMathTan(LMathTan* instr) {
|
|
|
| void LCodeGen::DoMathCos(LMathCos* instr) {
|
| ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
|
| + // Set the context register to a GC-safe fake value. Clobbering it is
|
| + // OK because this instruction is marked as a call.
|
| + __ Set(rsi, 0);
|
| TranscendentalCacheStub stub(TranscendentalCache::COS,
|
| TranscendentalCacheStub::UNTAGGED);
|
| CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
| @@ -3703,6 +3847,9 @@ void LCodeGen::DoMathCos(LMathCos* instr) {
|
|
|
| void LCodeGen::DoMathSin(LMathSin* instr) {
|
| ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
|
| + // Set the context register to a GC-safe fake value. Clobbering it is
|
| + // OK because this instruction is marked as a call.
|
| + __ Set(rsi, 0);
|
| TranscendentalCacheStub stub(TranscendentalCache::SIN,
|
| TranscendentalCacheStub::UNTAGGED);
|
| CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
| @@ -3710,6 +3857,7 @@ void LCodeGen::DoMathSin(LMathSin* instr) {
|
|
|
|
|
| void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| ASSERT(ToRegister(instr->function()).is(rdi));
|
| ASSERT(instr->HasPointerMap());
|
|
|
| @@ -3719,7 +3867,6 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
|
| SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
|
| ParameterCount count(instr->arity());
|
| __ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
|
| - __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
|
| } else {
|
| CallKnownFunction(known_function,
|
| instr->hydrogen()->formal_parameter_count(),
|
| @@ -3732,6 +3879,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
|
|
|
|
|
| void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| ASSERT(ToRegister(instr->key()).is(rcx));
|
| ASSERT(ToRegister(instr->result()).is(rax));
|
|
|
| @@ -3739,11 +3887,11 @@ void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
|
| Handle<Code> ic =
|
| isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
|
| CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
| - __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
|
| }
|
|
|
|
|
| void LCodeGen::DoCallNamed(LCallNamed* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| ASSERT(ToRegister(instr->result()).is(rax));
|
|
|
| int arity = instr->arity();
|
| @@ -3752,22 +3900,22 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) {
|
| isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
|
| __ Move(rcx, instr->name());
|
| CallCode(ic, mode, instr);
|
| - __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
|
| }
|
|
|
|
|
| void LCodeGen::DoCallFunction(LCallFunction* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| ASSERT(ToRegister(instr->function()).is(rdi));
|
| ASSERT(ToRegister(instr->result()).is(rax));
|
|
|
| int arity = instr->arity();
|
| CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
|
| CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
| - __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
|
| }
|
|
|
|
|
| void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| ASSERT(ToRegister(instr->result()).is(rax));
|
| int arity = instr->arity();
|
| RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
|
| @@ -3775,7 +3923,6 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
|
| isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
|
| __ Move(rcx, instr->name());
|
| CallCode(ic, mode, instr);
|
| - __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
|
| }
|
|
|
|
|
| @@ -3791,6 +3938,7 @@ void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
|
|
|
|
|
| void LCodeGen::DoCallNew(LCallNew* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| ASSERT(ToRegister(instr->constructor()).is(rdi));
|
| ASSERT(ToRegister(instr->result()).is(rax));
|
|
|
| @@ -3804,6 +3952,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
|
|
|
|
|
| void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| ASSERT(ToRegister(instr->constructor()).is(rdi));
|
| ASSERT(ToRegister(instr->result()).is(rax));
|
|
|
| @@ -3827,13 +3976,13 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
|
| // look at the first argument
|
| __ movq(rcx, Operand(rsp, 0));
|
| __ testq(rcx, rcx);
|
| - __ j(zero, &packed_case);
|
| + __ j(zero, &packed_case, Label::kNear);
|
|
|
| ElementsKind holey_kind = GetHoleyElementsKind(kind);
|
| ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
|
| override_mode);
|
| CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
|
| - __ jmp(&done);
|
| + __ jmp(&done, Label::kNear);
|
| __ bind(&packed_case);
|
| }
|
|
|
| @@ -3848,6 +3997,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
|
|
|
|
|
| void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
|
| }
|
|
|
| @@ -3954,6 +4104,10 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
|
| if (operand_value->IsRegister()) {
|
| Register value = ToRegister(operand_value);
|
| __ Store(FieldOperand(write_register, offset), value, representation);
|
| + } else if (representation.IsInteger32()) {
|
| + int32_t value = ToInteger32(operand_value);
|
| + ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
|
| + __ movl(FieldOperand(write_register, offset), Immediate(value));
|
| } else {
|
| Handle<Object> handle_value = ToHandle(operand_value);
|
| ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
|
| @@ -3980,6 +4134,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
|
|
|
|
|
| void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| ASSERT(ToRegister(instr->object()).is(rdx));
|
| ASSERT(ToRegister(instr->value()).is(rax));
|
|
|
| @@ -4129,7 +4284,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
|
| Label have_value;
|
|
|
| __ ucomisd(value, value);
|
| - __ j(parity_odd, &have_value); // NaN.
|
| + __ j(parity_odd, &have_value, Label::kNear); // NaN.
|
|
|
| __ Set(kScratchRegister, BitCast<uint64_t>(
|
| FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
|
| @@ -4217,6 +4372,7 @@ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
|
|
|
|
|
| void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| ASSERT(ToRegister(instr->object()).is(rdx));
|
| ASSERT(ToRegister(instr->key()).is(rcx));
|
| ASSERT(ToRegister(instr->value()).is(rax));
|
| @@ -4252,6 +4408,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
|
| if (!object_reg.is(rax)) {
|
| __ movq(rax, object_reg);
|
| }
|
| + LoadContextFromDeferred(instr->context());
|
| __ Move(rbx, to_map);
|
| TransitionElementsKindStub stub(from_kind, to_kind);
|
| __ CallStub(&stub);
|
| @@ -4273,10 +4430,19 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
|
|
|
|
|
| void LCodeGen::DoStringAdd(LStringAdd* instr) {
|
| - EmitPushTaggedOperand(instr->left());
|
| - EmitPushTaggedOperand(instr->right());
|
| - StringAddStub stub(instr->hydrogen()->flags());
|
| - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| + if (FLAG_new_string_add) {
|
| + ASSERT(ToRegister(instr->left()).is(rdx));
|
| + ASSERT(ToRegister(instr->right()).is(rax));
|
| + NewStringAddStub stub(instr->hydrogen()->flags(),
|
| + isolate()->heap()->GetPretenureMode());
|
| + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
| + } else {
|
| + EmitPushTaggedOperand(instr->left());
|
| + EmitPushTaggedOperand(instr->right());
|
| + StringAddStub stub(instr->hydrogen()->flags());
|
| + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
| + }
|
| }
|
|
|
|
|
| @@ -4327,7 +4493,8 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
|
| __ Integer32ToSmi(index, index);
|
| __ push(index);
|
| }
|
| - CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
|
| + CallRuntimeFromDeferred(
|
| + Runtime::kStringCharCodeAt, 2, instr, instr->context());
|
| __ AssertSmi(rax);
|
| __ SmiToInteger32(rax, rax);
|
| __ StoreToSafepointRegisterSlot(result, rax);
|
| @@ -4380,7 +4547,7 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
|
| PushSafepointRegistersScope scope(this);
|
| __ Integer32ToSmi(char_code, char_code);
|
| __ push(char_code);
|
| - CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
|
| + CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
|
| __ StoreToSafepointRegisterSlot(result, rax);
|
| }
|
|
|
| @@ -4421,6 +4588,22 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
|
| }
|
|
|
|
|
| +void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
|
| + LOperand* input = instr->value();
|
| + ASSERT(input->IsRegister());
|
| + LOperand* output = instr->result();
|
| + if (!instr->hydrogen()->value()->HasRange() ||
|
| + !instr->hydrogen()->value()->range()->IsInSmiRange() ||
|
| + instr->hydrogen()->value()->range()->upper() == kMaxInt) {
|
| + // The Range class can't express upper bounds in the (kMaxInt, kMaxUint32]
|
| + // interval, so we treat kMaxInt as a sentinel for this entire interval.
|
| + __ testl(ToRegister(input), Immediate(0x80000000));
|
| + DeoptimizeIf(not_zero, instr->environment());
|
| + }
|
| + __ Integer32ToSmi(ToRegister(output), ToRegister(input));
|
| +}
|
| +
|
| +
|
| void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
|
| LOperand* input = instr->value();
|
| ASSERT(input->IsRegister() && input->Equals(instr->result()));
|
| @@ -4484,7 +4667,16 @@ void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
|
| // integer value.
|
| __ StoreToSafepointRegisterSlot(reg, Immediate(0));
|
|
|
| - CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
|
| + // NumberTagU uses the context from the frame, rather than
|
| + // the environment's HContext or HInlinedContext value.
|
| + // They only call Runtime::kAllocateHeapNumber.
|
| + // The corresponding HChange instructions are added in a phase that does
|
| + // not have easy access to the local context.
|
| + __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
|
| + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
|
| + RecordSafepointWithRegisters(
|
| + instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
|
| +
|
| if (!reg.is(rax)) __ movq(reg, rax);
|
|
|
| // Done. Put the value in temp_xmm into the value of the allocated heap
|
| @@ -4532,8 +4724,15 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
|
|
|
| {
|
| PushSafepointRegistersScope scope(this);
|
| - CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
|
| - // Ensure that value in rax survives popping registers.
|
| + // NumberTagD uses the context from the frame, rather than
|
| + // the environment's HContext or HInlinedContext value.
|
| + // They only call Runtime::kAllocateHeapNumber.
|
| + // The corresponding HChange instructions are added in a phase that does
|
| + // not have easy access to the local context.
|
| + __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
|
| + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
|
| + RecordSafepointWithRegisters(
|
| + instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
|
| __ movq(kScratchRegister, rax);
|
| }
|
| __ movq(reg, kScratchRegister);
|
| @@ -4582,7 +4781,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
|
| __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
|
|
|
| if (can_convert_undefined_to_nan) {
|
| - __ j(not_equal, &convert);
|
| + __ j(not_equal, &convert, Label::kNear);
|
| } else {
|
| DeoptimizeIf(not_equal, env);
|
| }
|
| @@ -4839,7 +5038,11 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
|
| {
|
| PushSafepointRegistersScope scope(this);
|
| __ push(object);
|
| - CallRuntimeFromDeferred(Runtime::kMigrateInstance, 1, instr);
|
| + __ Set(rsi, 0);
|
| + __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
|
| + RecordSafepointWithRegisters(
|
| + instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
|
| +
|
| __ testq(rax, Immediate(kSmiTagMask));
|
| }
|
| DeoptimizeIf(zero, instr->environment());
|
| @@ -4880,12 +5083,12 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
|
| Label success;
|
| for (int i = 0; i < map_set.size() - 1; i++) {
|
| Handle<Map> map = map_set.at(i).handle();
|
| - __ CompareMap(reg, map, &success);
|
| - __ j(equal, &success);
|
| + __ CompareMap(reg, map);
|
| + __ j(equal, &success, Label::kNear);
|
| }
|
|
|
| Handle<Map> map = map_set.at(map_set.size() - 1).handle();
|
| - __ CompareMap(reg, map, &success);
|
| + __ CompareMap(reg, map);
|
| if (instr->hydrogen()->has_migration_target()) {
|
| __ j(not_equal, deferred->entry());
|
| } else {
|
| @@ -4917,8 +5120,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
|
| XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
|
| XMMRegister xmm_scratch = double_scratch0();
|
| Label is_smi, done, heap_number;
|
| -
|
| - __ JumpIfSmi(input_reg, &is_smi);
|
| + Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
|
| + __ JumpIfSmi(input_reg, &is_smi, dist);
|
|
|
| // Check for heap number
|
| __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
|
| @@ -5031,12 +5234,15 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
|
| if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
|
| ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
|
| ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
|
| - CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr);
|
| + CallRuntimeFromDeferred(
|
| + Runtime::kAllocateInOldPointerSpace, 1, instr, instr->context());
|
| } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
|
| ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
|
| - CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr);
|
| + CallRuntimeFromDeferred(
|
| + Runtime::kAllocateInOldDataSpace, 1, instr, instr->context());
|
| } else {
|
| - CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
|
| + CallRuntimeFromDeferred(
|
| + Runtime::kAllocateInNewSpace, 1, instr, instr->context());
|
| }
|
| __ StoreToSafepointRegisterSlot(result, rax);
|
| }
|
| @@ -5050,6 +5256,7 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
|
|
|
|
|
| void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| Label materialized;
|
| // Registers will be used as follows:
|
| // rcx = literals array.
|
| @@ -5075,7 +5282,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
|
| int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
|
| Label allocated, runtime_allocate;
|
| __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
|
| - __ jmp(&allocated);
|
| + __ jmp(&allocated, Label::kNear);
|
|
|
| __ bind(&runtime_allocate);
|
| __ push(rbx);
|
| @@ -5100,6 +5307,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
|
|
|
|
|
| void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| // Use the fast case closure allocation code that allocates in new
|
| // space for nested functions that don't need literals cloning.
|
| bool pretenure = instr->hydrogen()->pretenure();
|
| @@ -5119,6 +5327,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
|
|
|
|
|
| void LCodeGen::DoTypeof(LTypeof* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| LOperand* input = instr->value();
|
| EmitPushTaggedOperand(input);
|
| CallRuntime(Runtime::kTypeof, 1, instr);
|
| @@ -5288,6 +5497,11 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
|
| }
|
|
|
|
|
| +void LCodeGen::DoDummy(LDummy* instr) {
|
| + // Nothing to see here, move on!
|
| +}
|
| +
|
| +
|
| void LCodeGen::DoDummyUse(LDummyUse* instr) {
|
| // Nothing to see here, move on!
|
| }
|
| @@ -5326,6 +5540,9 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
|
| Label done;
|
| __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
|
| __ j(above_equal, &done, Label::kNear);
|
| +
|
| + ASSERT(instr->context()->IsRegister());
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| CallCode(isolate()->builtins()->StackCheck(),
|
| RelocInfo::CODE_TARGET,
|
| instr);
|
| @@ -5369,6 +5586,7 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
|
|
|
|
|
| void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(rsi));
|
| __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
|
| DeoptimizeIf(equal, instr->environment());
|
|
|
| @@ -5408,9 +5626,9 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
|
| Label load_cache, done;
|
| __ EnumLength(result, map);
|
| __ Cmp(result, Smi::FromInt(0));
|
| - __ j(not_equal, &load_cache);
|
| + __ j(not_equal, &load_cache, Label::kNear);
|
| __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
|
| - __ jmp(&done);
|
| + __ jmp(&done, Label::kNear);
|
| __ bind(&load_cache);
|
| __ LoadInstanceDescriptors(map, result);
|
| __ movq(result,
|
| @@ -5438,7 +5656,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
|
| Label out_of_object, done;
|
| __ SmiToInteger32(index, index);
|
| __ cmpl(index, Immediate(0));
|
| - __ j(less, &out_of_object);
|
| + __ j(less, &out_of_object, Label::kNear);
|
| __ movq(object, FieldOperand(object,
|
| index,
|
| times_pointer_size,
|
|
|