Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(153)

Side by Side Diff: src/x64/lithium-codegen-x64.cc

Issue 11695006: Cleanup RelocInfo::NONE usage. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years, 12 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after
157 } else { 157 } else {
158 __ push(rdi); // Callee's JS function. 158 __ push(rdi); // Callee's JS function.
159 } 159 }
160 } 160 }
161 161
162 // Reserve space for the stack slots needed by the code. 162 // Reserve space for the stack slots needed by the code.
163 int slots = GetStackSlotCount(); 163 int slots = GetStackSlotCount();
164 if (slots > 0) { 164 if (slots > 0) {
165 if (FLAG_debug_code) { 165 if (FLAG_debug_code) {
166 __ Set(rax, slots); 166 __ Set(rax, slots);
167 __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE); 167 __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE64);
168 Label loop; 168 Label loop;
169 __ bind(&loop); 169 __ bind(&loop);
170 __ push(kScratchRegister); 170 __ push(kScratchRegister);
171 __ decl(rax); 171 __ decl(rax);
172 __ j(not_zero, &loop); 172 __ j(not_zero, &loop);
173 } else { 173 } else {
174 __ subq(rsp, Immediate(slots * kPointerSize)); 174 __ subq(rsp, Immediate(slots * kPointerSize));
175 #ifdef _MSC_VER 175 #ifdef _MSC_VER
176 // On windows, you may not access the stack more than one page below 176 // On windows, you may not access the stack more than one page below
177 // the most recently mapped page. To make the allocated area randomly 177 // the most recently mapped page. To make the allocated area randomly
(...skipping 948 matching lines...) Expand 10 before | Expand all | Expand 10 after
1126 // The multiplier is a uint32. 1126 // The multiplier is a uint32.
1127 ASSERT(multiplier > 0 && 1127 ASSERT(multiplier > 0 &&
1128 multiplier < (static_cast<int64_t>(1) << 32)); 1128 multiplier < (static_cast<int64_t>(1) << 32));
1129 // The multiply is int64, so sign-extend to r64. 1129 // The multiply is int64, so sign-extend to r64.
1130 __ movsxlq(reg1, dividend); 1130 __ movsxlq(reg1, dividend);
1131 if (divisor < 0 && 1131 if (divisor < 0 &&
1132 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1132 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1133 __ neg(reg1); 1133 __ neg(reg1);
1134 DeoptimizeIf(zero, instr->environment()); 1134 DeoptimizeIf(zero, instr->environment());
1135 } 1135 }
1136 __ movq(reg2, multiplier, RelocInfo::NONE); 1136 __ movq(reg2, multiplier, RelocInfo::NONE64);
1137 // Result just fit in r64, because it's int32 * uint32. 1137 // Result just fit in r64, because it's int32 * uint32.
1138 __ imul(reg2, reg1); 1138 __ imul(reg2, reg1);
1139 1139
1140 __ addq(reg2, Immediate(1 << 30)); 1140 __ addq(reg2, Immediate(1 << 30));
1141 __ sar(reg2, Immediate(shift)); 1141 __ sar(reg2, Immediate(shift));
1142 } 1142 }
1143 } 1143 }
1144 1144
1145 1145
1146 void LCodeGen::DoDivI(LDivI* instr) { 1146 void LCodeGen::DoDivI(LDivI* instr) {
(...skipping 425 matching lines...) Expand 10 before | Expand all | Expand 10 after
1572 JSDate::kCacheStampOffset)); 1572 JSDate::kCacheStampOffset));
1573 __ j(not_equal, &runtime, Label::kNear); 1573 __ j(not_equal, &runtime, Label::kNear);
1574 __ movq(result, FieldOperand(object, JSDate::kValueOffset + 1574 __ movq(result, FieldOperand(object, JSDate::kValueOffset +
1575 kPointerSize * index->value())); 1575 kPointerSize * index->value()));
1576 __ jmp(&done); 1576 __ jmp(&done);
1577 } 1577 }
1578 __ bind(&runtime); 1578 __ bind(&runtime);
1579 __ PrepareCallCFunction(2); 1579 __ PrepareCallCFunction(2);
1580 #ifdef _WIN64 1580 #ifdef _WIN64
1581 __ movq(rcx, object); 1581 __ movq(rcx, object);
1582 __ movq(rdx, index, RelocInfo::NONE); 1582 __ movq(rdx, index, RelocInfo::NONE64);
1583 #else 1583 #else
1584 __ movq(rdi, object); 1584 __ movq(rdi, object);
1585 __ movq(rsi, index, RelocInfo::NONE); 1585 __ movq(rsi, index, RelocInfo::NONE64);
1586 #endif 1586 #endif
1587 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); 1587 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1588 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); 1588 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
1589 __ bind(&done); 1589 __ bind(&done);
1590 } 1590 }
1591 } 1591 }
1592 1592
1593 1593
1594 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { 1594 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1595 SeqStringSetCharGenerator::Generate(masm(), 1595 SeqStringSetCharGenerator::Generate(masm(),
(...skipping 1835 matching lines...) Expand 10 before | Expand all | Expand 10 after
3431 } 3431 }
3432 3432
3433 3433
3434 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { 3434 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3435 const XMMRegister xmm_scratch = xmm0; 3435 const XMMRegister xmm_scratch = xmm0;
3436 Register output_reg = ToRegister(instr->result()); 3436 Register output_reg = ToRegister(instr->result());
3437 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3437 XMMRegister input_reg = ToDoubleRegister(instr->value());
3438 3438
3439 Label done; 3439 Label done;
3440 // xmm_scratch = 0.5 3440 // xmm_scratch = 0.5
3441 __ movq(kScratchRegister, V8_INT64_C(0x3FE0000000000000), RelocInfo::NONE); 3441 __ movq(kScratchRegister, V8_INT64_C(0x3FE0000000000000), RelocInfo::NONE64);
3442 __ movq(xmm_scratch, kScratchRegister); 3442 __ movq(xmm_scratch, kScratchRegister);
3443 Label below_half; 3443 Label below_half;
3444 __ ucomisd(xmm_scratch, input_reg); 3444 __ ucomisd(xmm_scratch, input_reg);
3445 // If input_reg is NaN, this doesn't jump. 3445 // If input_reg is NaN, this doesn't jump.
3446 __ j(above, &below_half, Label::kNear); 3446 __ j(above, &below_half, Label::kNear);
3447 // input = input + 0.5 3447 // input = input + 0.5
3448 // This addition might give a result that isn't the correct for 3448 // This addition might give a result that isn't the correct for
3449 // rounding, due to loss of precision, but only for a number that's 3449 // rounding, due to loss of precision, but only for a number that's
3450 // so big that the conversion below will overflow anyway. 3450 // so big that the conversion below will overflow anyway.
3451 __ addsd(xmm_scratch, input_reg); 3451 __ addsd(xmm_scratch, input_reg);
3452 // Compute Math.floor(input). 3452 // Compute Math.floor(input).
3453 // Use truncating instruction (OK because input is positive). 3453 // Use truncating instruction (OK because input is positive).
3454 __ cvttsd2si(output_reg, xmm_scratch); 3454 __ cvttsd2si(output_reg, xmm_scratch);
3455 // Overflow is signalled with minint. 3455 // Overflow is signalled with minint.
3456 __ cmpl(output_reg, Immediate(0x80000000)); 3456 __ cmpl(output_reg, Immediate(0x80000000));
3457 DeoptimizeIf(equal, instr->environment()); 3457 DeoptimizeIf(equal, instr->environment());
3458 __ jmp(&done); 3458 __ jmp(&done);
3459 3459
3460 __ bind(&below_half); 3460 __ bind(&below_half);
3461 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3461 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3462 // Bailout if negative (including -0). 3462 // Bailout if negative (including -0).
3463 __ movq(output_reg, input_reg); 3463 __ movq(output_reg, input_reg);
3464 __ testq(output_reg, output_reg); 3464 __ testq(output_reg, output_reg);
3465 DeoptimizeIf(negative, instr->environment()); 3465 DeoptimizeIf(negative, instr->environment());
3466 } else { 3466 } else {
3467 // Bailout if below -0.5, otherwise round to (positive) zero, even 3467 // Bailout if below -0.5, otherwise round to (positive) zero, even
3468 // if negative. 3468 // if negative.
3469 // xmm_scrach = -0.5 3469 // xmm_scrach = -0.5
3470 __ movq(kScratchRegister, V8_INT64_C(0xBFE0000000000000), RelocInfo::NONE); 3470 __ movq(kScratchRegister, V8_INT64_C(0xBFE0000000000000), RelocInfo::NONE64) ;
ulan 2013/01/03 10:37:10 Long line.
3471 __ movq(xmm_scratch, kScratchRegister); 3471 __ movq(xmm_scratch, kScratchRegister);
3472 __ ucomisd(input_reg, xmm_scratch); 3472 __ ucomisd(input_reg, xmm_scratch);
3473 DeoptimizeIf(below, instr->environment()); 3473 DeoptimizeIf(below, instr->environment());
3474 } 3474 }
3475 __ xorl(output_reg, output_reg); 3475 __ xorl(output_reg, output_reg);
3476 3476
3477 __ bind(&done); 3477 __ bind(&done);
3478 } 3478 }
3479 3479
3480 3480
3481 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { 3481 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3482 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3482 XMMRegister input_reg = ToDoubleRegister(instr->value());
3483 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); 3483 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3484 __ sqrtsd(input_reg, input_reg); 3484 __ sqrtsd(input_reg, input_reg);
3485 } 3485 }
3486 3486
3487 3487
3488 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { 3488 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
3489 XMMRegister xmm_scratch = xmm0; 3489 XMMRegister xmm_scratch = xmm0;
3490 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3490 XMMRegister input_reg = ToDoubleRegister(instr->value());
3491 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); 3491 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3492 3492
3493 // Note that according to ECMA-262 15.8.2.13: 3493 // Note that according to ECMA-262 15.8.2.13:
3494 // Math.pow(-Infinity, 0.5) == Infinity 3494 // Math.pow(-Infinity, 0.5) == Infinity
3495 // Math.sqrt(-Infinity) == NaN 3495 // Math.sqrt(-Infinity) == NaN
3496 Label done, sqrt; 3496 Label done, sqrt;
3497 // Check base for -Infinity. According to IEEE-754, double-precision 3497 // Check base for -Infinity. According to IEEE-754, double-precision
3498 // -Infinity has the highest 12 bits set and the lowest 52 bits cleared. 3498 // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
3499 __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE); 3499 __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE64);
3500 __ movq(xmm_scratch, kScratchRegister); 3500 __ movq(xmm_scratch, kScratchRegister);
3501 __ ucomisd(xmm_scratch, input_reg); 3501 __ ucomisd(xmm_scratch, input_reg);
3502 // Comparing -Infinity with NaN results in "unordered", which sets the 3502 // Comparing -Infinity with NaN results in "unordered", which sets the
3503 // zero flag as if both were equal. However, it also sets the carry flag. 3503 // zero flag as if both were equal. However, it also sets the carry flag.
3504 __ j(not_equal, &sqrt, Label::kNear); 3504 __ j(not_equal, &sqrt, Label::kNear);
3505 __ j(carry, &sqrt, Label::kNear); 3505 __ j(carry, &sqrt, Label::kNear);
3506 // If input is -Infinity, return Infinity. 3506 // If input is -Infinity, return Infinity.
3507 __ xorps(input_reg, input_reg); 3507 __ xorps(input_reg, input_reg);
3508 __ subsd(input_reg, xmm_scratch); 3508 __ subsd(input_reg, xmm_scratch);
3509 __ jmp(&done, Label::kNear); 3509 __ jmp(&done, Label::kNear);
(...skipping 1075 matching lines...) Expand 10 before | Expand all | Expand 10 after
4585 LOperand* result = instr->result(); 4585 LOperand* result = instr->result();
4586 ASSERT(result->IsRegister()); 4586 ASSERT(result->IsRegister());
4587 4587
4588 XMMRegister input_reg = ToDoubleRegister(input); 4588 XMMRegister input_reg = ToDoubleRegister(input);
4589 Register result_reg = ToRegister(result); 4589 Register result_reg = ToRegister(result);
4590 4590
4591 if (instr->truncating()) { 4591 if (instr->truncating()) {
4592 // Performs a truncating conversion of a floating point number as used by 4592 // Performs a truncating conversion of a floating point number as used by
4593 // the JS bitwise operations. 4593 // the JS bitwise operations.
4594 __ cvttsd2siq(result_reg, input_reg); 4594 __ cvttsd2siq(result_reg, input_reg);
4595 __ movq(kScratchRegister, V8_INT64_C(0x8000000000000000), RelocInfo::NONE); 4595 __ movq(kScratchRegister, V8_INT64_C(0x8000000000000000), RelocInfo::NONE64) ;
ulan 2013/01/03 10:37:10 Long line.
4596 __ cmpq(result_reg, kScratchRegister); 4596 __ cmpq(result_reg, kScratchRegister);
4597 DeoptimizeIf(equal, instr->environment()); 4597 DeoptimizeIf(equal, instr->environment());
4598 } else { 4598 } else {
4599 __ cvttsd2si(result_reg, input_reg); 4599 __ cvttsd2si(result_reg, input_reg);
4600 __ cvtlsi2sd(xmm0, result_reg); 4600 __ cvtlsi2sd(xmm0, result_reg);
4601 __ ucomisd(xmm0, input_reg); 4601 __ ucomisd(xmm0, input_reg);
4602 DeoptimizeIf(not_equal, instr->environment()); 4602 DeoptimizeIf(not_equal, instr->environment());
4603 DeoptimizeIf(parity_even, instr->environment()); // NaN. 4603 DeoptimizeIf(parity_even, instr->environment()); // NaN.
4604 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4604 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4605 Label done; 4605 Label done;
(...skipping 380 matching lines...) Expand 10 before | Expand all | Expand 10 after
4986 if (value->IsJSObject()) { 4986 if (value->IsJSObject()) {
4987 Handle<JSObject> value_object = Handle<JSObject>::cast(value); 4987 Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4988 __ lea(rcx, Operand(result, *offset)); 4988 __ lea(rcx, Operand(result, *offset));
4989 __ movq(FieldOperand(result, total_offset), rcx); 4989 __ movq(FieldOperand(result, total_offset), rcx);
4990 __ LoadHeapObject(source, value_object); 4990 __ LoadHeapObject(source, value_object);
4991 EmitDeepCopy(value_object, result, source, offset); 4991 EmitDeepCopy(value_object, result, source, offset);
4992 } else if (value->IsHeapObject()) { 4992 } else if (value->IsHeapObject()) {
4993 __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value)); 4993 __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
4994 __ movq(FieldOperand(result, total_offset), rcx); 4994 __ movq(FieldOperand(result, total_offset), rcx);
4995 } else { 4995 } else {
4996 __ movq(rcx, value, RelocInfo::NONE); 4996 __ movq(rcx, value, RelocInfo::NONE64);
4997 __ movq(FieldOperand(result, total_offset), rcx); 4997 __ movq(FieldOperand(result, total_offset), rcx);
4998 } 4998 }
4999 } 4999 }
5000 5000
5001 if (has_elements) { 5001 if (has_elements) {
5002 // Copy elements backing store header. 5002 // Copy elements backing store header.
5003 __ LoadHeapObject(source, elements); 5003 __ LoadHeapObject(source, elements);
5004 for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) { 5004 for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
5005 __ movq(rcx, FieldOperand(source, i)); 5005 __ movq(rcx, FieldOperand(source, i));
5006 __ movq(FieldOperand(result, elements_offset + i), rcx); 5006 __ movq(FieldOperand(result, elements_offset + i), rcx);
5007 } 5007 }
5008 5008
5009 // Copy elements backing store content. 5009 // Copy elements backing store content.
5010 int elements_length = elements->length(); 5010 int elements_length = elements->length();
5011 if (elements->IsFixedDoubleArray()) { 5011 if (elements->IsFixedDoubleArray()) {
5012 Handle<FixedDoubleArray> double_array = 5012 Handle<FixedDoubleArray> double_array =
5013 Handle<FixedDoubleArray>::cast(elements); 5013 Handle<FixedDoubleArray>::cast(elements);
5014 for (int i = 0; i < elements_length; i++) { 5014 for (int i = 0; i < elements_length; i++) {
5015 int64_t value = double_array->get_representation(i); 5015 int64_t value = double_array->get_representation(i);
5016 int total_offset = 5016 int total_offset =
5017 elements_offset + FixedDoubleArray::OffsetOfElementAt(i); 5017 elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
5018 __ movq(rcx, value, RelocInfo::NONE); 5018 __ movq(rcx, value, RelocInfo::NONE64);
5019 __ movq(FieldOperand(result, total_offset), rcx); 5019 __ movq(FieldOperand(result, total_offset), rcx);
5020 } 5020 }
5021 } else if (elements->IsFixedArray()) { 5021 } else if (elements->IsFixedArray()) {
5022 Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements); 5022 Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
5023 for (int i = 0; i < elements_length; i++) { 5023 for (int i = 0; i < elements_length; i++) {
5024 int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i); 5024 int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
5025 Handle<Object> value(fast_elements->get(i)); 5025 Handle<Object> value(fast_elements->get(i));
5026 if (value->IsJSObject()) { 5026 if (value->IsJSObject()) {
5027 Handle<JSObject> value_object = Handle<JSObject>::cast(value); 5027 Handle<JSObject> value_object = Handle<JSObject>::cast(value);
5028 __ lea(rcx, Operand(result, *offset)); 5028 __ lea(rcx, Operand(result, *offset));
5029 __ movq(FieldOperand(result, total_offset), rcx); 5029 __ movq(FieldOperand(result, total_offset), rcx);
5030 __ LoadHeapObject(source, value_object); 5030 __ LoadHeapObject(source, value_object);
5031 EmitDeepCopy(value_object, result, source, offset); 5031 EmitDeepCopy(value_object, result, source, offset);
5032 } else if (value->IsHeapObject()) { 5032 } else if (value->IsHeapObject()) {
5033 __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value)); 5033 __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
5034 __ movq(FieldOperand(result, total_offset), rcx); 5034 __ movq(FieldOperand(result, total_offset), rcx);
5035 } else { 5035 } else {
5036 __ movq(rcx, value, RelocInfo::NONE); 5036 __ movq(rcx, value, RelocInfo::NONE64);
5037 __ movq(FieldOperand(result, total_offset), rcx); 5037 __ movq(FieldOperand(result, total_offset), rcx);
5038 } 5038 }
5039 } 5039 }
5040 } else { 5040 } else {
5041 UNREACHABLE(); 5041 UNREACHABLE();
5042 } 5042 }
5043 } 5043 }
5044 } 5044 }
5045 5045
5046 5046
(...skipping 502 matching lines...) Expand 10 before | Expand all | Expand 10 after
5549 FixedArray::kHeaderSize - kPointerSize)); 5549 FixedArray::kHeaderSize - kPointerSize));
5550 __ bind(&done); 5550 __ bind(&done);
5551 } 5551 }
5552 5552
5553 5553
5554 #undef __ 5554 #undef __
5555 5555
5556 } } // namespace v8::internal 5556 } } // namespace v8::internal
5557 5557
5558 #endif // V8_TARGET_ARCH_X64 5558 #endif // V8_TARGET_ARCH_X64
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698