Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(381)

Side by Side Diff: src/ia32/lithium-codegen-ia32.cc

Issue 78283002: Restore saved caller FP registers on stub failure (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Port to ia32, arm, and mips Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/lithium-codegen-ia32.h ('k') | src/ia32/macro-assembler-ia32.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
123 #ifdef _MSC_VER 123 #ifdef _MSC_VER
124 void LCodeGen::MakeSureStackPagesMapped(int offset) { 124 void LCodeGen::MakeSureStackPagesMapped(int offset) {
125 const int kPageSize = 4 * KB; 125 const int kPageSize = 4 * KB;
126 for (offset -= kPageSize; offset > 0; offset -= kPageSize) { 126 for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
127 __ mov(Operand(esp, offset), eax); 127 __ mov(Operand(esp, offset), eax);
128 } 128 }
129 } 129 }
130 #endif 130 #endif
131 131
132 132
133 void LCodeGen::SaveCallerDoubles() {
134 ASSERT(info()->saves_caller_doubles());
135 ASSERT(NeedsEagerFrame());
136 Comment(";;; Save clobbered callee double registers");
137 CpuFeatureScope scope(masm(), SSE2);
138 int count = 0;
139 BitVector* doubles = chunk()->allocated_double_registers();
140 BitVector::Iterator save_iterator(doubles);
141 while (!save_iterator.Done()) {
142 __ movsd(MemOperand(esp, count * kDoubleSize),
143 XMMRegister::FromAllocationIndex(save_iterator.Current()));
144 save_iterator.Advance();
145 count++;
146 }
147 }
148
149
150 void LCodeGen::RestoreCallerDoubles() {
151 ASSERT(info()->saves_caller_doubles());
152 ASSERT(NeedsEagerFrame());
153 Comment(";;; Restore clobbered callee double registers");
154 CpuFeatureScope scope(masm(), SSE2);
155 BitVector* doubles = chunk()->allocated_double_registers();
156 BitVector::Iterator save_iterator(doubles);
157 int count = 0;
158 while (!save_iterator.Done()) {
159 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
160 MemOperand(esp, count * kDoubleSize));
161 save_iterator.Advance();
162 count++;
163 }
164 }
165
166
133 bool LCodeGen::GeneratePrologue() { 167 bool LCodeGen::GeneratePrologue() {
134 ASSERT(is_generating()); 168 ASSERT(is_generating());
135 169
136 if (info()->IsOptimizing()) { 170 if (info()->IsOptimizing()) {
137 ProfileEntryHookStub::MaybeCallEntryHook(masm_); 171 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
138 172
139 #ifdef DEBUG 173 #ifdef DEBUG
140 if (strlen(FLAG_stop_at) > 0 && 174 if (strlen(FLAG_stop_at) > 0 &&
141 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { 175 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
142 __ int3(); 176 __ int3();
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after
237 int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset; 271 int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
238 if (dynamic_frame_alignment_) { 272 if (dynamic_frame_alignment_) {
239 __ mov(Operand(ebp, offset), edx); 273 __ mov(Operand(ebp, offset), edx);
240 } else { 274 } else {
241 __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding)); 275 __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
242 } 276 }
243 } 277 }
244 } 278 }
245 279
246 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { 280 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
247 Comment(";;; Save clobbered callee double registers"); 281 SaveCallerDoubles();
248 CpuFeatureScope scope(masm(), SSE2);
249 int count = 0;
250 BitVector* doubles = chunk()->allocated_double_registers();
251 BitVector::Iterator save_iterator(doubles);
252 while (!save_iterator.Done()) {
253 __ movsd(MemOperand(esp, count * kDoubleSize),
254 XMMRegister::FromAllocationIndex(save_iterator.Current()));
255 save_iterator.Advance();
256 count++;
257 }
258 } 282 }
259 } 283 }
260 284
261 // Possibly allocate a local context. 285 // Possibly allocate a local context.
262 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 286 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
263 if (heap_slots > 0) { 287 if (heap_slots > 0) {
264 Comment(";;; Allocate local context"); 288 Comment(";;; Allocate local context");
265 // Argument to NewContext is the function, which is still in edi. 289 // Argument to NewContext is the function, which is still in edi.
266 __ push(edi); 290 __ push(edi);
267 if (heap_slots <= FastNewContextStub::kMaximumSlots) { 291 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
392 __ bind(&jump_table_[i].label); 416 __ bind(&jump_table_[i].label);
393 Address entry = jump_table_[i].address; 417 Address entry = jump_table_[i].address;
394 Deoptimizer::BailoutType type = jump_table_[i].bailout_type; 418 Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
395 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); 419 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
396 if (id == Deoptimizer::kNotDeoptimizationEntry) { 420 if (id == Deoptimizer::kNotDeoptimizationEntry) {
397 Comment(";;; jump table entry %d.", i); 421 Comment(";;; jump table entry %d.", i);
398 } else { 422 } else {
399 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); 423 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
400 } 424 }
401 if (jump_table_[i].needs_frame) { 425 if (jump_table_[i].needs_frame) {
426 ASSERT(!info()->saves_caller_doubles());
402 __ push(Immediate(ExternalReference::ForDeoptEntry(entry))); 427 __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
403 if (needs_frame.is_bound()) { 428 if (needs_frame.is_bound()) {
404 __ jmp(&needs_frame); 429 __ jmp(&needs_frame);
405 } else { 430 } else {
406 __ bind(&needs_frame); 431 __ bind(&needs_frame);
407 __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset)); 432 __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
408 // This variant of deopt can only be used with stubs. Since we don't 433 // This variant of deopt can only be used with stubs. Since we don't
409 // have a function pointer to install in the stack frame that we're 434 // have a function pointer to install in the stack frame that we're
410 // building, install a special marker there instead. 435 // building, install a special marker there instead.
411 ASSERT(info()->IsStub()); 436 ASSERT(info()->IsStub());
412 __ push(Immediate(Smi::FromInt(StackFrame::STUB))); 437 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
413 // Push a PC inside the function so that the deopt code can find where 438 // Push a PC inside the function so that the deopt code can find where
414 // the deopt comes from. It doesn't have to be the precise return 439 // the deopt comes from. It doesn't have to be the precise return
415 // address of a "calling" LAZY deopt, it only has to be somewhere 440 // address of a "calling" LAZY deopt, it only has to be somewhere
416 // inside the code body. 441 // inside the code body.
417 Label push_approx_pc; 442 Label push_approx_pc;
418 __ call(&push_approx_pc); 443 __ call(&push_approx_pc);
419 __ bind(&push_approx_pc); 444 __ bind(&push_approx_pc);
420 // Push the continuation which was stashed were the ebp should 445 // Push the continuation which was stashed were the ebp should
421 // be. Replace it with the saved ebp. 446 // be. Replace it with the saved ebp.
422 __ push(MemOperand(esp, 3 * kPointerSize)); 447 __ push(MemOperand(esp, 3 * kPointerSize));
423 __ mov(MemOperand(esp, 4 * kPointerSize), ebp); 448 __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
424 __ lea(ebp, MemOperand(esp, 4 * kPointerSize)); 449 __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
425 __ ret(0); // Call the continuation without clobbering registers. 450 __ ret(0); // Call the continuation without clobbering registers.
426 } 451 }
427 } else { 452 } else {
453 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
454 RestoreCallerDoubles();
455 }
428 __ call(entry, RelocInfo::RUNTIME_ENTRY); 456 __ call(entry, RelocInfo::RUNTIME_ENTRY);
429 } 457 }
430 } 458 }
431 return !is_aborted(); 459 return !is_aborted();
432 } 460 }
433 461
434 462
435 bool LCodeGen::GenerateDeferredCode() { 463 bool LCodeGen::GenerateDeferredCode() {
436 ASSERT(is_generating()); 464 ASSERT(is_generating());
437 if (deferred_.length() > 0) { 465 if (deferred_.length() > 0) {
(...skipping 2714 matching lines...) Expand 10 before | Expand all | Expand 10 after
3152 if (FLAG_trace && info()->IsOptimizing()) { 3180 if (FLAG_trace && info()->IsOptimizing()) {
3153 // Preserve the return value on the stack and rely on the runtime call 3181 // Preserve the return value on the stack and rely on the runtime call
3154 // to return the value in the same register. We're leaving the code 3182 // to return the value in the same register. We're leaving the code
3155 // managed by the register allocator and tearing down the frame, it's 3183 // managed by the register allocator and tearing down the frame, it's
3156 // safe to write to the context register. 3184 // safe to write to the context register.
3157 __ push(eax); 3185 __ push(eax);
3158 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 3186 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
3159 __ CallRuntime(Runtime::kTraceExit, 1); 3187 __ CallRuntime(Runtime::kTraceExit, 1);
3160 } 3188 }
3161 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { 3189 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
3162 ASSERT(NeedsEagerFrame()); 3190 RestoreCallerDoubles();
3163 CpuFeatureScope scope(masm(), SSE2);
3164 BitVector* doubles = chunk()->allocated_double_registers();
3165 BitVector::Iterator save_iterator(doubles);
3166 int count = 0;
3167 while (!save_iterator.Done()) {
3168 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
3169 MemOperand(esp, count * kDoubleSize));
3170 save_iterator.Advance();
3171 count++;
3172 }
3173 } 3191 }
3174 if (dynamic_frame_alignment_) { 3192 if (dynamic_frame_alignment_) {
3175 // Fetch the state of the dynamic frame alignment. 3193 // Fetch the state of the dynamic frame alignment.
3176 __ mov(edx, Operand(ebp, 3194 __ mov(edx, Operand(ebp,
3177 JavaScriptFrameConstants::kDynamicAlignmentStateOffset)); 3195 JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
3178 } 3196 }
3179 int no_frame_start = -1; 3197 int no_frame_start = -1;
3180 if (NeedsEagerFrame()) { 3198 if (NeedsEagerFrame()) {
3181 __ mov(esp, ebp); 3199 __ mov(esp, ebp);
3182 __ pop(ebp); 3200 __ pop(ebp);
(...skipping 3323 matching lines...) Expand 10 before | Expand all | Expand 10 after
6506 FixedArray::kHeaderSize - kPointerSize)); 6524 FixedArray::kHeaderSize - kPointerSize));
6507 __ bind(&done); 6525 __ bind(&done);
6508 } 6526 }
6509 6527
6510 6528
6511 #undef __ 6529 #undef __
6512 6530
6513 } } // namespace v8::internal 6531 } } // namespace v8::internal
6514 6532
6515 #endif // V8_TARGET_ARCH_IA32 6533 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/lithium-codegen-ia32.h ('k') | src/ia32/macro-assembler-ia32.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698