Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(8)

Side by Side Diff: src/mips/lithium-codegen-mips.cc

Issue 12212080: MIPS: Generate the TransitionElementsStub using Crankshaft (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Removed unused flag Created 7 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips/lithium-codegen-mips.h ('k') | src/mips/lithium-mips.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after
140 140
141 int receiver_offset = scope()->num_parameters() * kPointerSize; 141 int receiver_offset = scope()->num_parameters() * kPointerSize;
142 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); 142 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
143 __ sw(a2, MemOperand(sp, receiver_offset)); 143 __ sw(a2, MemOperand(sp, receiver_offset));
144 __ bind(&ok); 144 __ bind(&ok);
145 } 145 }
146 } 146 }
147 147
148 info()->set_prologue_offset(masm_->pc_offset()); 148 info()->set_prologue_offset(masm_->pc_offset());
149 if (NeedsEagerFrame()) { 149 if (NeedsEagerFrame()) {
150 // The following three instructions must remain together and unmodified for 150 if (info()->IsStub()) {
151 // code aging to work properly. 151 __ Push(ra, fp, cp);
152 __ Push(ra, fp, cp, a1); 152 __ Push(Smi::FromInt(StackFrame::STUB));
153 // Add unused load of ip to ensure prologue sequence is identical for 153 // Adjust FP to point to saved FP.
154 // full-codegen and lithium-codegen. 154 __ Addu(fp, sp, Operand(2 * kPointerSize));
155 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); 155 } else {
156 // Adj. FP to point to saved FP. 156 // The following three instructions must remain together and unmodified
157 __ Addu(fp, sp, Operand(2 * kPointerSize)); 157 // for code aging to work properly.
158 __ Push(ra, fp, cp, a1);
159 // Add unused load of ip to ensure prologue sequence is identical for
160 // full-codegen and lithium-codegen.
161 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
162 // Adj. FP to point to saved FP.
163 __ Addu(fp, sp, Operand(2 * kPointerSize));
164 }
158 frame_is_built_ = true; 165 frame_is_built_ = true;
159 } 166 }
160 167
161 // Reserve space for the stack slots needed by the code. 168 // Reserve space for the stack slots needed by the code.
162 int slots = GetStackSlotCount(); 169 int slots = GetStackSlotCount();
163 if (slots > 0) { 170 if (slots > 0) {
164 if (FLAG_debug_code) { 171 if (FLAG_debug_code) {
165 __ li(a0, Operand(slots)); 172 __ Subu(sp, sp, Operand(slots * kPointerSize));
166 __ li(a2, Operand(kSlotsZapValue)); 173 __ push(a0);
174 __ push(a1);
175 __ Addu(a0, sp, Operand(slots * kPointerSize));
176 __ li(a1, Operand(kSlotsZapValue));
167 Label loop; 177 Label loop;
168 __ bind(&loop); 178 __ bind(&loop);
169 __ push(a2); 179 __ Subu(a0, a0, Operand(kPointerSize));
170 __ Subu(a0, a0, 1); 180 __ sw(a1, MemOperand(a0, 2 * kPointerSize));
171 __ Branch(&loop, ne, a0, Operand(zero_reg)); 181 __ Branch(&loop, ne, a0, Operand(sp));
182 __ pop(a1);
183 __ pop(a0);
172 } else { 184 } else {
173 __ Subu(sp, sp, Operand(slots * kPointerSize)); 185 __ Subu(sp, sp, Operand(slots * kPointerSize));
174 } 186 }
175 } 187 }
176 188
189 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(FPU)) {
190 CpuFeatures::Scope scope(FPU);
191 Comment(";;; Save clobbered callee double registers");
192 int count = 0;
193 BitVector* doubles = chunk()->allocated_double_registers();
194 BitVector::Iterator save_iterator(doubles);
195 while (!save_iterator.Done()) {
196 __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
197 MemOperand(sp, count * kDoubleSize));
198 save_iterator.Advance();
199 count++;
200 }
201 }
202
177 // Possibly allocate a local context. 203 // Possibly allocate a local context.
178 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 204 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
179 if (heap_slots > 0) { 205 if (heap_slots > 0) {
180 Comment(";;; Allocate local context"); 206 Comment(";;; Allocate local context");
181 // Argument to NewContext is the function, which is in a1. 207 // Argument to NewContext is the function, which is in a1.
182 __ push(a1); 208 __ push(a1);
183 if (heap_slots <= FastNewContextStub::kMaximumSlots) { 209 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
184 FastNewContextStub stub(heap_slots); 210 FastNewContextStub stub(heap_slots);
185 __ CallStub(&stub); 211 __ CallStub(&stub);
186 } else { 212 } else {
(...skipping 2267 matching lines...) Expand 10 before | Expand all | Expand 10 after
2454 } 2480 }
2455 2481
2456 2482
2457 void LCodeGen::DoReturn(LReturn* instr) { 2483 void LCodeGen::DoReturn(LReturn* instr) {
2458 if (FLAG_trace && info()->IsOptimizing()) { 2484 if (FLAG_trace && info()->IsOptimizing()) {
2459 // Push the return value on the stack as the parameter. 2485 // Push the return value on the stack as the parameter.
2460 // Runtime::TraceExit returns its parameter in v0. 2486 // Runtime::TraceExit returns its parameter in v0.
2461 __ push(v0); 2487 __ push(v0);
2462 __ CallRuntime(Runtime::kTraceExit, 1); 2488 __ CallRuntime(Runtime::kTraceExit, 1);
2463 } 2489 }
2490 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(FPU)) {
2491 CpuFeatures::Scope scope(FPU);
2492 ASSERT(NeedsEagerFrame());
2493 BitVector* doubles = chunk()->allocated_double_registers();
2494 BitVector::Iterator save_iterator(doubles);
2495 int count = 0;
2496 while (!save_iterator.Done()) {
2497 __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
2498 MemOperand(sp, count * kDoubleSize));
2499 save_iterator.Advance();
2500 count++;
2501 }
2502 }
2464 if (NeedsEagerFrame()) { 2503 if (NeedsEagerFrame()) {
2465 int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize; 2504 int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
2466 __ mov(sp, fp); 2505 __ mov(sp, fp);
2467 __ Pop(ra, fp); 2506 __ Pop(ra, fp);
2468 __ Addu(sp, sp, Operand(sp_delta)); 2507 if (!info()->IsStub()) {
2508 __ Addu(sp, sp, Operand(sp_delta));
2509 }
2469 } 2510 }
2470 __ Jump(ra); 2511 __ Jump(ra);
2471 } 2512 }
2472 2513
2473 2514
2474 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { 2515 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2475 Register result = ToRegister(instr->result()); 2516 Register result = ToRegister(instr->result());
2476 __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell()))); 2517 __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell())));
2477 __ lw(result, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset)); 2518 __ lw(result, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
2478 if (instr->hydrogen()->RequiresHoleCheck()) { 2519 if (instr->hydrogen()->RequiresHoleCheck()) {
(...skipping 756 matching lines...) Expand 10 before | Expand all | Expand 10 after
3235 } 3276 }
3236 3277
3237 3278
3238 void LCodeGen::DoThisFunction(LThisFunction* instr) { 3279 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3239 Register result = ToRegister(instr->result()); 3280 Register result = ToRegister(instr->result());
3240 __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); 3281 __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3241 } 3282 }
3242 3283
3243 3284
3244 void LCodeGen::DoContext(LContext* instr) { 3285 void LCodeGen::DoContext(LContext* instr) {
3286 // If there is a non-return use, the context must be moved to a register.
3245 Register result = ToRegister(instr->result()); 3287 Register result = ToRegister(instr->result());
3246 __ mov(result, cp); 3288 for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) {
3289 if (!it.value()->IsReturn()) {
3290 __ mov(result, cp);
3291 return;
3292 }
3293 }
3247 } 3294 }
3248 3295
3249 3296
3250 void LCodeGen::DoOuterContext(LOuterContext* instr) { 3297 void LCodeGen::DoOuterContext(LOuterContext* instr) {
3251 Register context = ToRegister(instr->context()); 3298 Register context = ToRegister(instr->context());
3252 Register result = ToRegister(instr->result()); 3299 Register result = ToRegister(instr->result());
3253 __ lw(result, 3300 __ lw(result,
3254 MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX))); 3301 MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3255 } 3302 }
3256 3303
(...skipping 926 matching lines...) Expand 10 before | Expand all | Expand 10 after
4183 4230
4184 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode) 4231 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
4185 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() 4232 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4186 : isolate()->builtins()->KeyedStoreIC_Initialize(); 4233 : isolate()->builtins()->KeyedStoreIC_Initialize();
4187 CallCode(ic, RelocInfo::CODE_TARGET, instr); 4234 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4188 } 4235 }
4189 4236
4190 4237
4191 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { 4238 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4192 Register object_reg = ToRegister(instr->object()); 4239 Register object_reg = ToRegister(instr->object());
4193 Register new_map_reg = ToRegister(instr->new_map_temp());
4194 Register scratch = scratch0(); 4240 Register scratch = scratch0();
4195 4241
4196 Handle<Map> from_map = instr->original_map(); 4242 Handle<Map> from_map = instr->original_map();
4197 Handle<Map> to_map = instr->transitioned_map(); 4243 Handle<Map> to_map = instr->transitioned_map();
4198 ElementsKind from_kind = instr->from_kind(); 4244 ElementsKind from_kind = instr->from_kind();
4199 ElementsKind to_kind = instr->to_kind(); 4245 ElementsKind to_kind = instr->to_kind();
4200 4246
4201 __ mov(ToRegister(instr->result()), object_reg);
4202
4203 Label not_applicable; 4247 Label not_applicable;
4204 __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset)); 4248 __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4205 __ Branch(&not_applicable, ne, scratch, Operand(from_map)); 4249 __ Branch(&not_applicable, ne, scratch, Operand(from_map));
4206 4250
4207 __ li(new_map_reg, Operand(to_map));
4208 if (IsSimpleMapChangeTransition(from_kind, to_kind)) { 4251 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4252 Register new_map_reg = ToRegister(instr->new_map_temp());
4253 __ li(new_map_reg, Operand(to_map));
4209 __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); 4254 __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4210 // Write barrier. 4255 // Write barrier.
4211 __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, 4256 __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
4212 scratch, kRAHasBeenSaved, kDontSaveFPRegs); 4257 scratch, kRAHasBeenSaved, kDontSaveFPRegs);
4258 } else if (FLAG_compiled_transitions) {
4259 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4260 __ mov(a0, object_reg);
4261 __ li(a1, Operand(to_map));
4262 TransitionElementsKindStub stub(from_kind, to_kind);
4263 __ CallStub(&stub);
4264 RecordSafepointWithRegisters(
4265 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4213 } else if (IsFastSmiElementsKind(from_kind) && 4266 } else if (IsFastSmiElementsKind(from_kind) &&
4214 IsFastDoubleElementsKind(to_kind)) { 4267 IsFastDoubleElementsKind(to_kind)) {
4215 Register fixed_object_reg = ToRegister(instr->temp()); 4268 Register fixed_object_reg = ToRegister(instr->temp());
4216 ASSERT(fixed_object_reg.is(a2)); 4269 ASSERT(fixed_object_reg.is(a2));
4270 Register new_map_reg = ToRegister(instr->new_map_temp());
4217 ASSERT(new_map_reg.is(a3)); 4271 ASSERT(new_map_reg.is(a3));
4272 __ li(new_map_reg, Operand(to_map));
4218 __ mov(fixed_object_reg, object_reg); 4273 __ mov(fixed_object_reg, object_reg);
4219 CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(), 4274 CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
4220 RelocInfo::CODE_TARGET, instr); 4275 RelocInfo::CODE_TARGET, instr);
4221 } else if (IsFastDoubleElementsKind(from_kind) && 4276 } else if (IsFastDoubleElementsKind(from_kind) &&
4222 IsFastObjectElementsKind(to_kind)) { 4277 IsFastObjectElementsKind(to_kind)) {
4223 Register fixed_object_reg = ToRegister(instr->temp()); 4278 Register fixed_object_reg = ToRegister(instr->temp());
4224 ASSERT(fixed_object_reg.is(a2)); 4279 ASSERT(fixed_object_reg.is(a2));
4280 Register new_map_reg = ToRegister(instr->new_map_temp());
4225 ASSERT(new_map_reg.is(a3)); 4281 ASSERT(new_map_reg.is(a3));
4282 __ li(new_map_reg, Operand(to_map));
4226 __ mov(fixed_object_reg, object_reg); 4283 __ mov(fixed_object_reg, object_reg);
4227 CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(), 4284 CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
4228 RelocInfo::CODE_TARGET, instr); 4285 RelocInfo::CODE_TARGET, instr);
4229 } else { 4286 } else {
4230 UNREACHABLE(); 4287 UNREACHABLE();
4231 } 4288 }
4232 __ bind(&not_applicable); 4289 __ bind(&not_applicable);
4233 } 4290 }
4234 4291
4235 4292
4293 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4294 Register object = ToRegister(instr->object());
4295 Register temp = ToRegister(instr->temp());
4296 Label fail;
4297 __ TestJSArrayForAllocationSiteInfo(object, temp, ne, &fail);
4298 DeoptimizeIf(al, instr->environment());
4299 __ bind(&fail);
4300 }
4301
4302
4236 void LCodeGen::DoStringAdd(LStringAdd* instr) { 4303 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4237 __ push(ToRegister(instr->left())); 4304 __ push(ToRegister(instr->left()));
4238 __ push(ToRegister(instr->right())); 4305 __ push(ToRegister(instr->right()));
4239 StringAddStub stub(NO_STRING_CHECK_IN_STUB); 4306 StringAddStub stub(NO_STRING_CHECK_IN_STUB);
4240 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 4307 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4241 } 4308 }
4242 4309
4243 4310
4244 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { 4311 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4245 class DeferredStringCharCodeAt: public LDeferredCode { 4312 class DeferredStringCharCodeAt: public LDeferredCode {
(...skipping 316 matching lines...) Expand 10 before | Expand all | Expand 10 after
4562 private: 4629 private:
4563 LNumberTagD* instr_; 4630 LNumberTagD* instr_;
4564 }; 4631 };
4565 4632
4566 DoubleRegister input_reg = ToDoubleRegister(instr->value()); 4633 DoubleRegister input_reg = ToDoubleRegister(instr->value());
4567 Register scratch = scratch0(); 4634 Register scratch = scratch0();
4568 Register reg = ToRegister(instr->result()); 4635 Register reg = ToRegister(instr->result());
4569 Register temp1 = ToRegister(instr->temp()); 4636 Register temp1 = ToRegister(instr->temp());
4570 Register temp2 = ToRegister(instr->temp2()); 4637 Register temp2 = ToRegister(instr->temp2());
4571 4638
4639 bool convert_hole = false;
4640 HValue* change_input = instr->hydrogen()->value();
4641 if (change_input->IsLoadKeyed()) {
4642 HLoadKeyed* load = HLoadKeyed::cast(change_input);
4643 convert_hole = load->UsesMustHandleHole();
4644 }
4645
4646 Label no_special_nan_handling;
4647 Label done;
4648 if (convert_hole) {
4649 if (CpuFeatures::IsSupported(FPU)) {
4650 CpuFeatures::Scope scope(FPU);
4651 DoubleRegister input_reg = ToDoubleRegister(instr->value());
4652 __ BranchF(&no_special_nan_handling, NULL, eq, input_reg, input_reg);
4653 __ Move(reg, scratch0(), input_reg);
4654 Label canonicalize;
4655 __ Branch(&canonicalize, ne, scratch0(), Operand(kHoleNanUpper32));
4656 __ li(reg, factory()->the_hole_value());
4657 __ Branch(&done);
4658 __ bind(&canonicalize);
4659 __ Move(input_reg,
4660 FixedDoubleArray::canonical_not_the_hole_nan_as_double());
4661 } else {
4662 Label not_hole;
4663 __ Branch(&not_hole, ne, sfpd_hi, Operand(kHoleNanUpper32));
4664 __ li(reg, factory()->the_hole_value());
4665 __ Branch(&done);
4666 __ bind(&not_hole);
4667 __ And(scratch, sfpd_hi, Operand(0x7ff00000));
4668 __ Branch(&no_special_nan_handling, ne, scratch, Operand(0x7ff00000));
4669 Label special_nan_handling;
4670 __ And(at, sfpd_hi, Operand(0x000FFFFF));
4671 __ Branch(&special_nan_handling, ne, at, Operand(zero_reg));
4672 __ Branch(&no_special_nan_handling, eq, sfpd_lo, Operand(zero_reg));
4673 __ bind(&special_nan_handling);
4674 double canonical_nan =
4675 FixedDoubleArray::canonical_not_the_hole_nan_as_double();
4676 uint64_t casted_nan = BitCast<uint64_t>(canonical_nan);
4677 __ li(sfpd_lo,
4678 Operand(static_cast<uint32_t>(casted_nan & 0xFFFFFFFF)));
4679 __ li(sfpd_hi,
4680 Operand(static_cast<uint32_t>(casted_nan >> 32)));
4681 }
4682 }
4683
4684 __ bind(&no_special_nan_handling);
4572 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); 4685 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4573 if (FLAG_inline_new) { 4686 if (FLAG_inline_new) {
4574 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); 4687 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4575 // We want the untagged address first for performance 4688 // We want the untagged address first for performance
4576 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(), 4689 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4577 DONT_TAG_RESULT); 4690 DONT_TAG_RESULT);
4578 } else { 4691 } else {
4579 __ Branch(deferred->entry()); 4692 __ Branch(deferred->entry());
4580 } 4693 }
4581 __ bind(deferred->exit()); 4694 __ bind(deferred->exit());
4582 if (CpuFeatures::IsSupported(FPU)) { 4695 if (CpuFeatures::IsSupported(FPU)) {
4583 CpuFeatures::Scope scope(FPU); 4696 CpuFeatures::Scope scope(FPU);
4584 __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset)); 4697 __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
4585 } else { 4698 } else {
4586 __ sw(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset)); 4699 __ sw(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset));
4587 __ sw(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize)); 4700 __ sw(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize));
4588 } 4701 }
4589 // Now that we have finished with the object's real address tag it 4702 // Now that we have finished with the object's real address tag it
4590 __ Addu(reg, reg, kHeapObjectTag); 4703 __ Addu(reg, reg, kHeapObjectTag);
4704 __ bind(&done);
4591 } 4705 }
4592 4706
4593 4707
4594 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 4708 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4595 // TODO(3095996): Get rid of this. For now, we need to make the 4709 // TODO(3095996): Get rid of this. For now, we need to make the
4596 // result register contain a valid pointer because it is already 4710 // result register contain a valid pointer because it is already
4597 // contained in the register pointer map. 4711 // contained in the register pointer map.
4598 Register reg = ToRegister(instr->result()); 4712 Register reg = ToRegister(instr->result());
4599 __ mov(reg, zero_reg); 4713 __ mov(reg, zero_reg);
4600 4714
(...skipping 23 matching lines...) Expand all
4624 } else { 4738 } else {
4625 __ SmiUntag(result, input); 4739 __ SmiUntag(result, input);
4626 } 4740 }
4627 } 4741 }
4628 4742
4629 4743
4630 void LCodeGen::EmitNumberUntagD(Register input_reg, 4744 void LCodeGen::EmitNumberUntagD(Register input_reg,
4631 DoubleRegister result_reg, 4745 DoubleRegister result_reg,
4632 bool deoptimize_on_undefined, 4746 bool deoptimize_on_undefined,
4633 bool deoptimize_on_minus_zero, 4747 bool deoptimize_on_minus_zero,
4634 LEnvironment* env) { 4748 LEnvironment* env,
4749 NumberUntagDMode mode) {
4635 Register scratch = scratch0(); 4750 Register scratch = scratch0();
4636 CpuFeatures::Scope scope(FPU); 4751 CpuFeatures::Scope scope(FPU);
4637 4752
4638 Label load_smi, heap_number, done; 4753 Label load_smi, heap_number, done;
4639 4754
4640 // Smi check. 4755 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4641 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); 4756 // Smi check.
4757 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4642 4758
4643 // Heap number map check. 4759 // Heap number map check.
4644 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 4760 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4645 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); 4761 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4646 if (deoptimize_on_undefined) { 4762 if (deoptimize_on_undefined) {
4647 DeoptimizeIf(ne, env, scratch, Operand(at)); 4763 DeoptimizeIf(ne, env, scratch, Operand(at));
4764 } else {
4765 Label heap_number;
4766 __ Branch(&heap_number, eq, scratch, Operand(at));
4767
4768 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4769 DeoptimizeIf(ne, env, input_reg, Operand(at));
4770
4771 // Convert undefined to NaN.
4772 __ LoadRoot(at, Heap::kNanValueRootIndex);
4773 __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
4774 __ Branch(&done);
4775
4776 __ bind(&heap_number);
4777 }
4778 // Heap number to double register conversion.
4779 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4780 if (deoptimize_on_minus_zero) {
4781 __ mfc1(at, result_reg.low());
4782 __ Branch(&done, ne, at, Operand(zero_reg));
4783 __ mfc1(scratch, result_reg.high());
4784 DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
4785 }
4786 __ Branch(&done);
4787 } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
4788 __ SmiUntag(scratch, input_reg);
4789 DeoptimizeIf(Ugreater_equal, env, scratch, Operand(zero_reg));
4790 } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
4791 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4792 __ Move(result_reg,
4793 FixedDoubleArray::hole_nan_as_double());
4794 __ Branch(&done);
4648 } else { 4795 } else {
4649 Label heap_number; 4796 __ SmiUntag(scratch, input_reg);
4650 __ Branch(&heap_number, eq, scratch, Operand(at)); 4797 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
4651
4652 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4653 DeoptimizeIf(ne, env, input_reg, Operand(at));
4654
4655 // Convert undefined to NaN.
4656 __ LoadRoot(at, Heap::kNanValueRootIndex);
4657 __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
4658 __ Branch(&done);
4659
4660 __ bind(&heap_number);
4661 } 4798 }
4662 // Heap number to double register conversion.
4663 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4664 if (deoptimize_on_minus_zero) {
4665 __ mfc1(at, result_reg.low());
4666 __ Branch(&done, ne, at, Operand(zero_reg));
4667 __ mfc1(scratch, result_reg.high());
4668 DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
4669 }
4670 __ Branch(&done);
4671 4799
4672 // Smi to double register conversion 4800 // Smi to double register conversion
4673 __ bind(&load_smi); 4801 __ bind(&load_smi);
4674 // scratch: untagged value of input_reg 4802 // scratch: untagged value of input_reg
4675 __ mtc1(scratch, result_reg); 4803 __ mtc1(scratch, result_reg);
4676 __ cvt_d_w(result_reg, result_reg); 4804 __ cvt_d_w(result_reg, result_reg);
4677 __ bind(&done); 4805 __ bind(&done);
4678 } 4806 }
4679 4807
4680 4808
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after
4787 4915
4788 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 4916 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4789 LOperand* input = instr->value(); 4917 LOperand* input = instr->value();
4790 ASSERT(input->IsRegister()); 4918 ASSERT(input->IsRegister());
4791 LOperand* result = instr->result(); 4919 LOperand* result = instr->result();
4792 ASSERT(result->IsDoubleRegister()); 4920 ASSERT(result->IsDoubleRegister());
4793 4921
4794 Register input_reg = ToRegister(input); 4922 Register input_reg = ToRegister(input);
4795 DoubleRegister result_reg = ToDoubleRegister(result); 4923 DoubleRegister result_reg = ToDoubleRegister(result);
4796 4924
4925 NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
4926 HValue* value = instr->hydrogen()->value();
4927 if (value->type().IsSmi()) {
4928 if (value->IsLoadKeyed()) {
4929 HLoadKeyed* load = HLoadKeyed::cast(value);
4930 if (load->UsesMustHandleHole()) {
4931 if (load->hole_mode() == ALLOW_RETURN_HOLE) {
4932 mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
4933 } else {
4934 mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
4935 }
4936 } else {
4937 mode = NUMBER_CANDIDATE_IS_SMI;
4938 }
4939 }
4940 }
4941
4797 EmitNumberUntagD(input_reg, result_reg, 4942 EmitNumberUntagD(input_reg, result_reg,
4798 instr->hydrogen()->deoptimize_on_undefined(), 4943 instr->hydrogen()->deoptimize_on_undefined(),
4799 instr->hydrogen()->deoptimize_on_minus_zero(), 4944 instr->hydrogen()->deoptimize_on_minus_zero(),
4800 instr->environment()); 4945 instr->environment(),
4946 mode);
4801 } 4947 }
4802 4948
4803 4949
4804 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { 4950 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4805 Register result_reg = ToRegister(instr->result()); 4951 Register result_reg = ToRegister(instr->result());
4806 Register scratch1 = scratch0(); 4952 Register scratch1 = scratch0();
4807 Register scratch2 = ToRegister(instr->temp()); 4953 Register scratch2 = ToRegister(instr->temp());
4808 DoubleRegister double_input = ToDoubleRegister(instr->value()); 4954 DoubleRegister double_input = ToDoubleRegister(instr->value());
4809 4955
4810 if (instr->truncating()) { 4956 if (instr->truncating()) {
(...skipping 273 matching lines...) Expand 10 before | Expand all | Expand 10 after
5084 __ mov(result, zero_reg); 5230 __ mov(result, zero_reg);
5085 5231
5086 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 5232 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5087 __ li(a0, Operand(Smi::FromInt(instance_size))); 5233 __ li(a0, Operand(Smi::FromInt(instance_size)));
5088 __ push(a0); 5234 __ push(a0);
5089 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr); 5235 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
5090 __ StoreToSafepointRegisterSlot(v0, result); 5236 __ StoreToSafepointRegisterSlot(v0, result);
5091 } 5237 }
5092 5238
5093 5239
5240 void LCodeGen::DoAllocate(LAllocate* instr) {
5241 class DeferredAllocate: public LDeferredCode {
5242 public:
5243 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5244 : LDeferredCode(codegen), instr_(instr) { }
5245 virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
5246 virtual LInstruction* instr() { return instr_; }
5247 private:
5248 LAllocate* instr_;
5249 };
5250
5251 DeferredAllocate* deferred =
5252 new(zone()) DeferredAllocate(this, instr);
5253
5254 Register size = ToRegister(instr->size());
5255 Register result = ToRegister(instr->result());
5256 Register scratch = ToRegister(instr->temp1());
5257 Register scratch2 = ToRegister(instr->temp2());
5258
5259 HAllocate* original_instr = instr->hydrogen();
5260 if (original_instr->size()->IsConstant()) {
5261 UNREACHABLE();
5262 } else {
5263 // Allocate memory for the object.
5264 AllocationFlags flags = TAG_OBJECT;
5265 if (original_instr->MustAllocateDoubleAligned()) {
5266 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5267 }
5268 __ AllocateInNewSpace(size,
5269 result,
5270 scratch,
5271 scratch2,
5272 deferred->entry(),
5273 TAG_OBJECT);
5274 }
5275
5276 __ bind(deferred->exit());
5277 }
5278
5279
5280 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5281 Register size = ToRegister(instr->size());
5282 Register result = ToRegister(instr->result());
5283
5284 // TODO(3095996): Get rid of this. For now, we need to make the
5285 // result register contain a valid pointer because it is already
5286 // contained in the register pointer map.
5287 __ mov(result, zero_reg);
5288
5289 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5290 __ SmiTag(size, size);
5291 __ push(size);
5292 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
5293 __ StoreToSafepointRegisterSlot(v0, result);
5294 }
5295
5296
5094 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { 5297 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
5095 Handle<FixedArray> literals(instr->environment()->closure()->literals()); 5298 Handle<FixedArray> literals(instr->environment()->closure()->literals());
5096 ElementsKind boilerplate_elements_kind = 5299 ElementsKind boilerplate_elements_kind =
5097 instr->hydrogen()->boilerplate_elements_kind(); 5300 instr->hydrogen()->boilerplate_elements_kind();
5098 AllocationSiteMode allocation_site_mode = 5301 AllocationSiteMode allocation_site_mode =
5099 instr->hydrogen()->allocation_site_mode(); 5302 instr->hydrogen()->allocation_site_mode();
5100 5303
5101 // Deopt if the array literal boilerplate ElementsKind is of a type different 5304 // Deopt if the array literal boilerplate ElementsKind is of a type different
5102 // than the expected one. The check isn't necessary if the boilerplate has 5305 // than the expected one. The check isn't necessary if the boilerplate has
5103 // already been converted to TERMINAL_FAST_ELEMENTS_KIND. 5306 // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
(...skipping 706 matching lines...) Expand 10 before | Expand all | Expand 10 after
5810 __ Subu(scratch, result, scratch); 6013 __ Subu(scratch, result, scratch);
5811 __ lw(result, FieldMemOperand(scratch, 6014 __ lw(result, FieldMemOperand(scratch,
5812 FixedArray::kHeaderSize - kPointerSize)); 6015 FixedArray::kHeaderSize - kPointerSize));
5813 __ bind(&done); 6016 __ bind(&done);
5814 } 6017 }
5815 6018
5816 6019
5817 #undef __ 6020 #undef __
5818 6021
5819 } } // namespace v8::internal 6022 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/mips/lithium-codegen-mips.h ('k') | src/mips/lithium-mips.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698