Index: runtime/vm/assembler_x64.cc |
diff --git a/runtime/vm/assembler_x64.cc b/runtime/vm/assembler_x64.cc |
index bc213eb74b1a16b3f3945b80d9bcb3a2e6847705..cd4a47648382c9811f22dd4105a45932a3d1cf45 100644 |
--- a/runtime/vm/assembler_x64.cc |
+++ b/runtime/vm/assembler_x64.cc |
@@ -1693,15 +1693,29 @@ static const Register volatile_cpu_registers[kNumberOfVolatileCpuRegisters] = { |
RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11 |
}; |
+static const intptr_t kNumberOfVolatileRegisters = |
+ kNumberOfVolatileCpuRegisters + (kNumberOfXmmRegisters - 1); |
srdjan
2012/08/30 17:59:24
ditto
Vyacheslav Egorov (Google)
2012/09/21 12:51:52
Done.
|
+ |
void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) { |
enter(Immediate(0)); |
- // Preserve volatile registers. |
+ // Preserve volatile CPU registers. |
for (intptr_t i = 0; i < kNumberOfVolatileCpuRegisters; i++) { |
pushq(volatile_cpu_registers[i]); |
} |
+ // Preserve all XMM registers except XMM0 |
+ subq(RSP, Immediate((kNumberOfXmmRegisters - 1) * kDoubleSize)); |
+ // Store XMM registers with the lowest register number at the lowest |
+ // address. |
+ intptr_t offset = 0; |
+ for (intptr_t reg_idx = 1; reg_idx < kNumberOfXmmRegisters; ++reg_idx) { |
+ XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx); |
+ movsd(Address(RSP, offset), xmm_reg); |
+ offset += kDoubleSize; |
+ } |
+ |
ReserveAlignedFrameSpace(frame_space); |
} |
@@ -1710,9 +1724,19 @@ void Assembler::LeaveCallRuntimeFrame() { |
// RSP might have been modified to reserve space for arguments |
// and ensure proper alignment of the stack frame. |
// We need to restore it before restoring registers. |
- leaq(RSP, Address(RBP, -kNumberOfVolatileCpuRegisters * kWordSize)); |
+ leaq(RSP, Address(RBP, -kNumberOfVolatileRegisters * kWordSize)); |
+ |
+ // Restore all XMM registers except XMM0 |
+ // XMM registers have the lowest register number at the lowest address. |
+ intptr_t offset = 0; |
+ for (intptr_t reg_idx = 1; reg_idx < kNumberOfXmmRegisters; ++reg_idx) { |
+ XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx); |
+ movsd(xmm_reg, Address(RSP, offset)); |
+ offset += kDoubleSize; |
+ } |
+ addq(RSP, Immediate(offset)); |
- // Restore volatile registers. |
+ // Restore volatile CPU registers. |
for (intptr_t i = kNumberOfVolatileCpuRegisters - 1; i >= 0; i--) { |
popq(volatile_cpu_registers[i]); |
} |