| Index: third_party/tcmalloc/chromium/src/windows/preamble_patcher.cc
|
| diff --git a/third_party/tcmalloc/chromium/src/windows/preamble_patcher.cc b/third_party/tcmalloc/chromium/src/windows/preamble_patcher.cc
|
| index 78a47639feee0c6066323eccda4bd18b3e100f38..b27a95bcb6f737f16d32e74c8243bb31c6810668 100644
|
| --- a/third_party/tcmalloc/chromium/src/windows/preamble_patcher.cc
|
| +++ b/third_party/tcmalloc/chromium/src/windows/preamble_patcher.cc
|
| @@ -29,6 +29,7 @@
|
| *
|
| * ---
|
| * Author: Joi Sigurdsson
|
| + * Author: Scott Francis
|
| *
|
| * Implementation of PreamblePatcher
|
| */
|
| @@ -46,18 +47,42 @@
|
| #define ASM_JMP32ABS_0 0xFF
|
| #define ASM_JMP32ABS_1 0x25
|
| #define ASM_JMP8REL 0xEB
|
| +#define ASM_JCC32REL_0 0x0F
|
| +#define ASM_JCC32REL_1_MASK 0x80
|
| +#define ASM_NOP 0x90
|
| +// X64 opcodes
|
| +#define ASM_REXW 0x48
|
| +#define ASM_MOVRAX_IMM 0xB8
|
| +#define ASM_JMP 0xFF
|
| +#define ASM_JMP_RAX 0xE0
|
|
|
| namespace sidestep {
|
|
|
| +PreamblePatcher::PreamblePage* PreamblePatcher::preamble_pages_ = NULL;
|
| +long PreamblePatcher::granularity_ = 0;
|
| +long PreamblePatcher::pagesize_ = 0;
|
| +bool PreamblePatcher::initialized_ = false;
|
| +
|
| +static const unsigned int kPreamblePageMagic = 0x4347414D; // "MAGC"
|
| +
|
| // Handle a special case that we see with functions that point into an
|
| // IAT table (including functions linked statically into the
|
| // application): these function already starts with ASM_JMP32*. For
|
| // instance, malloc() might be implemented as a JMP to __malloc().
|
| // This function follows the initial JMPs for us, until we get to the
|
| // place where the actual code is defined. If we get to STOP_BEFORE,
|
| -// we return the address before stop_before.
|
| +// we return the address before stop_before. The stop_before_trampoline
|
| +// flag is used in 64-bit mode. If true, we will return the address
|
| +// before a trampoline is detected. Trampolines are defined as:
|
| +//
|
| +// nop
|
| +// mov rax, <replacement_function>
|
| +// jmp rax
|
| +//
|
| +// See PreamblePatcher::RawPatchWithStub for more information.
|
| void* PreamblePatcher::ResolveTargetImpl(unsigned char* target,
|
| - unsigned char* stop_before) {
|
| + unsigned char* stop_before,
|
| + bool stop_before_trampoline) {
|
| if (target == NULL)
|
| return NULL;
|
| while (1) {
|
| @@ -81,15 +106,26 @@ void* PreamblePatcher::ResolveTargetImpl(unsigned char* target,
|
| // Visual studio seems to sometimes do it this way instead of the
|
| // previous way. Not sure what the rules are, but it was happening
|
| // with operator new in some binaries.
|
| - void **new_target_v;
|
| - SIDESTEP_ASSERT(sizeof(new_target) == 4);
|
| - memcpy(&new_target_v, reinterpret_cast<void*>(target + 2), 4);
|
| + void** new_target_v;
|
| + if (kIs64BitBinary) {
|
| + // In 64-bit mode JMPs are RIP-relative, not absolute
|
| + int target_offset;
|
| + memcpy(reinterpret_cast<void*>(&target_offset),
|
| + reinterpret_cast<void*>(target + 2), 4);
|
| + new_target_v = reinterpret_cast<void**>(target + target_offset + 6);
|
| + } else {
|
| + SIDESTEP_ASSERT(sizeof(new_target) == 4);
|
| + memcpy(&new_target_v, reinterpret_cast<void*>(target + 2), 4);
|
| + }
|
| new_target = reinterpret_cast<unsigned char*>(*new_target_v);
|
| } else {
|
| break;
|
| }
|
| if (new_target == stop_before)
|
| break;
|
| + if (stop_before_trampoline && *new_target == ASM_NOP
|
| + && new_target[1] == ASM_REXW && new_target[2] == ASM_MOVRAX_IMM)
|
| + break;
|
| target = new_target;
|
| }
|
| return target;
|
| @@ -103,7 +139,7 @@ class DeleteUnsignedCharArray {
|
|
|
| ~DeleteUnsignedCharArray() {
|
| if (array_) {
|
| - delete [] array_;
|
| + PreamblePatcher::FreePreambleBlock(array_);
|
| }
|
| }
|
|
|
| @@ -191,9 +227,23 @@ SideStepError PreamblePatcher::RawPatch(void* target_function,
|
| return SIDESTEP_INVALID_PARAMETER;
|
| }
|
|
|
| - // @see MAX_PREAMBLE_STUB_SIZE for an explanation of how we arrives at
|
| - // this size
|
| - unsigned char* preamble_stub = new unsigned char[MAX_PREAMBLE_STUB_SIZE];
|
| + BOOL succeeded = FALSE;
|
| +
|
| + // First, deal with a special case that we see with functions that
|
| + // point into an IAT table (including functions linked statically
|
| + // into the application): these function already starts with
|
| + // ASM_JMP32REL. For instance, malloc() might be implemented as a
|
| + // JMP to __malloc(). In that case, we replace the destination of
|
| + // the JMP (__malloc), rather than the JMP itself (malloc). This
|
| + // way we get the correct behavior no matter how malloc gets called.
|
| + void* new_target = ResolveTarget(target_function);
|
| + if (new_target != target_function) {
|
| + target_function = new_target;
|
| + }
|
| +
|
| + // In 64-bit mode, preamble_stub must be within 2GB of target function
|
| + // so that if target contains a jump, we can translate it.
|
| + unsigned char* preamble_stub = AllocPreambleBlockNear(target_function);
|
| if (!preamble_stub) {
|
| SIDESTEP_ASSERT(false && "Unable to allocate preamble-stub.");
|
| return SIDESTEP_INSUFFICIENT_BUFFER;
|
| @@ -202,19 +252,6 @@ SideStepError PreamblePatcher::RawPatch(void* target_function,
|
| // Frees the array at end of scope.
|
| DeleteUnsignedCharArray guard_preamble_stub(preamble_stub);
|
|
|
| - // Change the protection of the newly allocated preamble stub to
|
| - // PAGE_EXECUTE_READWRITE. This is required to work with DEP (Data
|
| - // Execution Prevention) which will cause an exception if code is executed
|
| - // from a page on which you do not have read access.
|
| - DWORD old_stub_protect = 0;
|
| - BOOL succeeded = ::VirtualProtect(preamble_stub, MAX_PREAMBLE_STUB_SIZE,
|
| - PAGE_EXECUTE_READWRITE, &old_stub_protect);
|
| - if (!succeeded) {
|
| - SIDESTEP_ASSERT(false &&
|
| - "Failed to make page preamble stub read-write-execute.");
|
| - return SIDESTEP_ACCESS_DENIED;
|
| - }
|
| -
|
| SideStepError error_code = RawPatchWithStubAndProtections(
|
| target_function, replacement_function, preamble_stub,
|
| MAX_PREAMBLE_STUB_SIZE, NULL);
|
| @@ -260,23 +297,6 @@ SideStepError PreamblePatcher::Unpatch(void* target_function,
|
| return SIDESTEP_INVALID_PARAMETER;
|
| }
|
|
|
| - // We disassemble the preamble of the _stub_ to see how many bytes we
|
| - // originally copied to the stub.
|
| - MiniDisassembler disassembler;
|
| - unsigned int preamble_bytes = 0;
|
| - while (preamble_bytes < 5) {
|
| - InstructionType instruction_type =
|
| - disassembler.Disassemble(
|
| - reinterpret_cast<unsigned char*>(original_function_stub) +
|
| - preamble_bytes,
|
| - preamble_bytes);
|
| - if (IT_GENERIC != instruction_type) {
|
| - SIDESTEP_ASSERT(false &&
|
| - "Should only have generic instructions in stub!!");
|
| - return SIDESTEP_UNSUPPORTED_INSTRUCTION;
|
| - }
|
| - }
|
| -
|
| // Before unpatching, target_function should be a JMP to
|
| // replacement_function. If it's not, then either it's an error, or
|
| // we're falling into the case where the original instruction was a
|
| @@ -286,7 +306,8 @@ SideStepError PreamblePatcher::Unpatch(void* target_function,
|
| unsigned char* target = reinterpret_cast<unsigned char*>(target_function);
|
| target = reinterpret_cast<unsigned char*>(
|
| ResolveTargetImpl(
|
| - target, reinterpret_cast<unsigned char*>(replacement_function)));
|
| + target, reinterpret_cast<unsigned char*>(replacement_function),
|
| + true));
|
| // We should end at the function we patched. When we patch, we insert
|
| // a ASM_JMP32REL instruction, so look for that as a sanity check.
|
| if (target[0] != ASM_JMP32REL) {
|
| @@ -295,11 +316,13 @@ SideStepError PreamblePatcher::Unpatch(void* target_function,
|
| return SIDESTEP_INVALID_PARAMETER;
|
| }
|
|
|
| + const unsigned int kRequiredTargetPatchBytes = 5;
|
| +
|
| // We need to be able to write to a process-local copy of the first
|
| - // MAX_PREAMBLE_STUB_SIZE bytes of target_function
|
| + // kRequiredTargetPatchBytes bytes of target_function
|
| DWORD old_target_function_protect = 0;
|
| - BOOL succeeded = ::VirtualProtect(reinterpret_cast<void*>(target_function),
|
| - MAX_PREAMBLE_STUB_SIZE,
|
| + BOOL succeeded = ::VirtualProtect(reinterpret_cast<void*>(target),
|
| + kRequiredTargetPatchBytes,
|
| PAGE_EXECUTE_READWRITE,
|
| &old_target_function_protect);
|
| if (!succeeded) {
|
| @@ -308,20 +331,67 @@ SideStepError PreamblePatcher::Unpatch(void* target_function,
|
| return SIDESTEP_ACCESS_DENIED;
|
| }
|
|
|
| - // Replace the first few bytes of the original function with the bytes we
|
| - // previously moved to the preamble stub.
|
| - memcpy(reinterpret_cast<void*>(target),
|
| - original_function_stub, preamble_bytes);
|
| + unsigned char* preamble_stub = reinterpret_cast<unsigned char*>(
|
| + original_function_stub);
|
|
|
| - // Stub is now useless so delete it.
|
| - // [csilvers: Commented out for perftools because it causes big problems
|
| - // when we're unpatching malloc. We just let this live on as a leak.]
|
| - //delete [] reinterpret_cast<unsigned char*>(original_function_stub);
|
| + // Disassemble the preamble of stub and copy the bytes back to target.
|
| + // If we've done any conditional jumps in the preamble we need to convert
|
| + // them back to the orignal REL8 jumps in the target.
|
| + MiniDisassembler disassembler;
|
| + unsigned int preamble_bytes = 0;
|
| + unsigned int target_bytes = 0;
|
| + while (target_bytes < kRequiredTargetPatchBytes) {
|
| + unsigned int cur_bytes = 0;
|
| + InstructionType instruction_type =
|
| + disassembler.Disassemble(preamble_stub + preamble_bytes, cur_bytes);
|
| + if (IT_JUMP == instruction_type) {
|
| + unsigned int jump_bytes = 0;
|
| + SideStepError jump_ret = SIDESTEP_JUMP_INSTRUCTION;
|
| + if (IsNearConditionalJump(preamble_stub + preamble_bytes, cur_bytes) ||
|
| + IsNearRelativeJump(preamble_stub + preamble_bytes, cur_bytes) ||
|
| + IsNearAbsoluteCall(preamble_stub + preamble_bytes, cur_bytes) ||
|
| + IsNearRelativeCall(preamble_stub + preamble_bytes, cur_bytes)) {
|
| + jump_ret = PatchNearJumpOrCall(preamble_stub + preamble_bytes,
|
| + cur_bytes, target + target_bytes,
|
| + &jump_bytes, MAX_PREAMBLE_STUB_SIZE);
|
| + }
|
| + if (jump_ret == SIDESTEP_JUMP_INSTRUCTION) {
|
| + SIDESTEP_ASSERT(false &&
|
| + "Found unsupported jump instruction in stub!!");
|
| + return SIDESTEP_UNSUPPORTED_INSTRUCTION;
|
| + }
|
| + target_bytes += jump_bytes;
|
| + } else if (IT_GENERIC == instruction_type) {
|
| + if (IsMovWithDisplacement(preamble_stub + preamble_bytes, cur_bytes)) {
|
| + unsigned int mov_bytes = 0;
|
| + if (PatchMovWithDisplacement(preamble_stub + preamble_bytes, cur_bytes,
|
| + target + target_bytes, &mov_bytes,
|
| + MAX_PREAMBLE_STUB_SIZE)
|
| + != SIDESTEP_SUCCESS) {
|
| + SIDESTEP_ASSERT(false &&
|
| + "Found unsupported generic instruction in stub!!");
|
| + return SIDESTEP_UNSUPPORTED_INSTRUCTION;
|
| + }
|
| + } else {
|
| + memcpy(reinterpret_cast<void*>(target + target_bytes),
|
| + reinterpret_cast<void*>(reinterpret_cast<unsigned char*>(
|
| + original_function_stub) + preamble_bytes), cur_bytes);
|
| + target_bytes += cur_bytes;
|
| + }
|
| + } else {
|
| + SIDESTEP_ASSERT(false &&
|
| + "Found unsupported instruction in stub!!");
|
| + return SIDESTEP_UNSUPPORTED_INSTRUCTION;
|
| + }
|
| + preamble_bytes += cur_bytes;
|
| + }
|
|
|
| - // Restore the protection of the first MAX_PREAMBLE_STUB_SIZE bytes of
|
| + FreePreambleBlock(reinterpret_cast<unsigned char*>(original_function_stub));
|
| +
|
| + // Restore the protection of the first kRequiredTargetPatchBytes bytes of
|
| // target to what they were before we started goofing around.
|
| succeeded = ::VirtualProtect(reinterpret_cast<void*>(target),
|
| - MAX_PREAMBLE_STUB_SIZE,
|
| + kRequiredTargetPatchBytes,
|
| old_target_function_protect,
|
| &old_target_function_protect);
|
|
|
| @@ -341,4 +411,274 @@ SideStepError PreamblePatcher::Unpatch(void* target_function,
|
| return SIDESTEP_SUCCESS;
|
| }
|
|
|
| +void PreamblePatcher::Initialize() {
|
| + if (!initialized_) {
|
| + SYSTEM_INFO si = { 0 };
|
| + ::GetSystemInfo(&si);
|
| + granularity_ = si.dwAllocationGranularity;
|
| + pagesize_ = si.dwPageSize;
|
| + initialized_ = true;
|
| + }
|
| +}
|
| +
|
| +unsigned char* PreamblePatcher::AllocPreambleBlockNear(void* target) {
|
| + PreamblePage* preamble_page = preamble_pages_;
|
| + while (preamble_page != NULL) {
|
| + if (preamble_page->free_ != NULL) {
|
| + __int64 val = reinterpret_cast<__int64>(preamble_page) -
|
| + reinterpret_cast<__int64>(target);
|
| + if ((val > 0 && val + pagesize_ <= INT_MAX) ||
|
| + (val < 0 && val >= INT_MIN)) {
|
| + break;
|
| + }
|
| + }
|
| + preamble_page = preamble_page->next_;
|
| + }
|
| +
|
| + // The free_ member of the page is used to store the next available block
|
| + // of memory to use or NULL if there are no chunks available, in which case
|
| + // we'll allocate a new page.
|
| + if (preamble_page == NULL || preamble_page->free_ == NULL) {
|
| + // Create a new preamble page and initialize the free list
|
| + preamble_page = reinterpret_cast<PreamblePage*>(AllocPageNear(target));
|
| + SIDESTEP_ASSERT(preamble_page != NULL && "Could not allocate page!");
|
| + void** pp = &preamble_page->free_;
|
| + unsigned char* ptr = reinterpret_cast<unsigned char*>(preamble_page) +
|
| + MAX_PREAMBLE_STUB_SIZE;
|
| + unsigned char* limit = reinterpret_cast<unsigned char*>(preamble_page) +
|
| + pagesize_;
|
| + while (ptr < limit) {
|
| + *pp = ptr;
|
| + pp = reinterpret_cast<void**>(ptr);
|
| + ptr += MAX_PREAMBLE_STUB_SIZE;
|
| + }
|
| + *pp = NULL;
|
| + // Insert the new page into the list
|
| + preamble_page->magic_ = kPreamblePageMagic;
|
| + preamble_page->next_ = preamble_pages_;
|
| + preamble_pages_ = preamble_page;
|
| + }
|
| + unsigned char* ret = reinterpret_cast<unsigned char*>(preamble_page->free_);
|
| + preamble_page->free_ = *(reinterpret_cast<void**>(preamble_page->free_));
|
| + return ret;
|
| +}
|
| +
|
| +void PreamblePatcher::FreePreambleBlock(unsigned char* block) {
|
| + SIDESTEP_ASSERT(block != NULL);
|
| + SIDESTEP_ASSERT(granularity_ != 0);
|
| + uintptr_t ptr = reinterpret_cast<uintptr_t>(block);
|
| + ptr -= ptr & (granularity_ - 1);
|
| + PreamblePage* preamble_page = reinterpret_cast<PreamblePage*>(ptr);
|
| + SIDESTEP_ASSERT(preamble_page->magic_ == kPreamblePageMagic);
|
| + *(reinterpret_cast<void**>(block)) = preamble_page->free_;
|
| + preamble_page->free_ = block;
|
| +}
|
| +
|
| +void* PreamblePatcher::AllocPageNear(void* target) {
|
| + MEMORY_BASIC_INFORMATION mbi = { 0 };
|
| + if (!::VirtualQuery(target, &mbi, sizeof(mbi))) {
|
| + SIDESTEP_ASSERT(false && "VirtualQuery failed on target address");
|
| + return 0;
|
| + }
|
| + if (initialized_ == false) {
|
| + PreamblePatcher::Initialize();
|
| + SIDESTEP_ASSERT(initialized_);
|
| + }
|
| + void* pv = NULL;
|
| + unsigned char* allocation_base = reinterpret_cast<unsigned char*>(
|
| + mbi.AllocationBase);
|
| + __int64 i = 1;
|
| + bool high_target = reinterpret_cast<__int64>(target) > UINT_MAX;
|
| + while (pv == NULL) {
|
| + __int64 val = reinterpret_cast<__int64>(allocation_base) -
|
| + (i * granularity_);
|
| + if (high_target &&
|
| + reinterpret_cast<__int64>(target) - val > INT_MAX) {
|
| + // We're further than 2GB from the target
|
| + break;
|
| + } else if (val <= NULL) {
|
| + // Less than 0
|
| + break;
|
| + }
|
| + pv = ::VirtualAlloc(reinterpret_cast<void*>(allocation_base -
|
| + (i++ * granularity_)),
|
| + pagesize_, MEM_COMMIT | MEM_RESERVE,
|
| + PAGE_EXECUTE_READWRITE);
|
| + }
|
| +
|
| + // We couldn't allocate low, try to allocate high
|
| + if (pv == NULL) {
|
| + i = 1;
|
| + // Round up to the next multiple of page granularity
|
| + allocation_base = reinterpret_cast<unsigned char*>(
|
| + (reinterpret_cast<__int64>(target) &
|
| + (~(granularity_ - 1))) + granularity_);
|
| + while (pv == NULL) {
|
| + __int64 val = reinterpret_cast<__int64>(allocation_base) +
|
| + (i * granularity_) - reinterpret_cast<__int64>(target);
|
| + if (val > INT_MAX || val < 0) {
|
| + // We're too far or we overflowed
|
| + break;
|
| + }
|
| + pv = ::VirtualAlloc(reinterpret_cast<void*>(allocation_base +
|
| + (i++ * granularity_)),
|
| + pagesize_, MEM_COMMIT | MEM_RESERVE,
|
| + PAGE_EXECUTE_READWRITE);
|
| + }
|
| + }
|
| + return pv;
|
| +}
|
| +
|
| +bool PreamblePatcher::IsShortConditionalJump(
|
| + unsigned char* target,
|
| + unsigned int instruction_size) {
|
| + return (*(target) & 0x70) == 0x70 && instruction_size == 2;
|
| +}
|
| +
|
| +bool PreamblePatcher::IsNearConditionalJump(
|
| + unsigned char* target,
|
| + unsigned int instruction_size) {
|
| + return *(target) == 0xf && (*(target + 1) & 0x80) == 0x80 &&
|
| + instruction_size == 6;
|
| +}
|
| +
|
| +bool PreamblePatcher::IsNearRelativeJump(
|
| + unsigned char* target,
|
| + unsigned int instruction_size) {
|
| + return *(target) == 0xe9 && instruction_size == 5;
|
| +}
|
| +
|
| +bool PreamblePatcher::IsNearAbsoluteCall(
|
| + unsigned char* target,
|
| + unsigned int instruction_size) {
|
| + return *(target) == 0xff && (*(target + 1) & 0x10) == 0x10 &&
|
| + instruction_size == 6;
|
| +}
|
| +
|
| +bool PreamblePatcher::IsNearRelativeCall(
|
| + unsigned char* target,
|
| + unsigned int instruction_size) {
|
| + return *(target) == 0xe8 && instruction_size == 5;
|
| +}
|
| +
|
| +bool PreamblePatcher::IsMovWithDisplacement(
|
| + unsigned char* target,
|
| + unsigned int instruction_size) {
|
| + // In this case, the ModRM byte's mod field will be 0 and r/m will be 101b (5)
|
| + return instruction_size == 7 && *target == 0x48 && *(target + 1) == 0x8b &&
|
| + (*(target + 2) >> 6) == 0 && (*(target + 2) & 0x7) == 5;
|
| +}
|
| +
|
| +SideStepError PreamblePatcher::PatchShortConditionalJump(
|
| + unsigned char* source,
|
| + unsigned int instruction_size,
|
| + unsigned char* target,
|
| + unsigned int* target_bytes,
|
| + unsigned int target_size) {
|
| + unsigned char* original_jump_dest = (source + 2) + source[1];
|
| + unsigned char* stub_jump_from = target + 6;
|
| + __int64 fixup_jump_offset = original_jump_dest - stub_jump_from;
|
| + if (fixup_jump_offset > INT_MAX || fixup_jump_offset < INT_MIN) {
|
| + SIDESTEP_ASSERT(false &&
|
| + "Unable to fix up short jump because target"
|
| + " is too far away.");
|
| + return SIDESTEP_JUMP_INSTRUCTION;
|
| + }
|
| +
|
| + *target_bytes = 6;
|
| + if (target_size > *target_bytes) {
|
| + // Convert the short jump to a near jump.
|
| + //
|
| + // 0f 8x xx xx xx xx = Jcc rel32off
|
| + unsigned short jmpcode = ((0x80 | (source[0] & 0xf)) << 8) | 0x0f;
|
| + memcpy(reinterpret_cast<void*>(target),
|
| + reinterpret_cast<void*>(&jmpcode), 2);
|
| + memcpy(reinterpret_cast<void*>(target + 2),
|
| + reinterpret_cast<void*>(&fixup_jump_offset), 4);
|
| + }
|
| +
|
| + return SIDESTEP_SUCCESS;
|
| +}
|
| +
|
| +SideStepError PreamblePatcher::PatchNearJumpOrCall(
|
| + unsigned char* source,
|
| + unsigned int instruction_size,
|
| + unsigned char* target,
|
| + unsigned int* target_bytes,
|
| + unsigned int target_size) {
|
| + SIDESTEP_ASSERT(instruction_size == 5 || instruction_size == 6);
|
| + unsigned int jmp_offset_in_instruction = instruction_size == 5 ? 1 : 2;
|
| + unsigned char* original_jump_dest = reinterpret_cast<unsigned char *>(
|
| + reinterpret_cast<__int64>(source + instruction_size) +
|
| + *(reinterpret_cast<int*>(source + jmp_offset_in_instruction)));
|
| + unsigned char* stub_jump_from = target + instruction_size;
|
| + __int64 fixup_jump_offset = original_jump_dest - stub_jump_from;
|
| + if (fixup_jump_offset > INT_MAX || fixup_jump_offset < INT_MIN) {
|
| + SIDESTEP_ASSERT(false &&
|
| + "Unable to fix up near jump because target"
|
| + " is too far away.");
|
| + return SIDESTEP_JUMP_INSTRUCTION;
|
| + }
|
| +
|
| + if ((fixup_jump_offset < SCHAR_MAX && fixup_jump_offset > SCHAR_MIN)) {
|
| + *target_bytes = 2;
|
| + if (target_size > *target_bytes) {
|
| + // If the new offset is in range, use a short jump instead of a near jump.
|
| + if (source[0] == ASM_JCC32REL_0 &&
|
| + (source[1] & ASM_JCC32REL_1_MASK) == ASM_JCC32REL_1_MASK) {
|
| + unsigned short jmpcode = (static_cast<unsigned char>(
|
| + fixup_jump_offset) << 8) | (0x70 | (source[1] & 0xf));
|
| + memcpy(reinterpret_cast<void*>(target),
|
| + reinterpret_cast<void*>(&jmpcode),
|
| + 2);
|
| + } else {
|
| + target[0] = ASM_JMP8REL;
|
| + target[1] = static_cast<unsigned char>(fixup_jump_offset);
|
| + }
|
| + }
|
| + } else {
|
| + *target_bytes = instruction_size;
|
| + if (target_size > *target_bytes) {
|
| + memcpy(reinterpret_cast<void*>(target),
|
| + reinterpret_cast<void*>(source),
|
| + jmp_offset_in_instruction);
|
| + memcpy(reinterpret_cast<void*>(target + jmp_offset_in_instruction),
|
| + reinterpret_cast<void*>(&fixup_jump_offset),
|
| + 4);
|
| + }
|
| + }
|
| +
|
| + return SIDESTEP_SUCCESS;
|
| +}
|
| +
|
| +SideStepError PreamblePatcher::PatchMovWithDisplacement(
|
| + unsigned char* source,
|
| + unsigned int instruction_size,
|
| + unsigned char* target,
|
| + unsigned int* target_bytes,
|
| + unsigned int target_size) {
|
| + SIDESTEP_ASSERT(instruction_size == 7);
|
| + const int mov_offset_in_instruction = 3; // 0x48 0x8b 0x0d <offset>
|
| + unsigned char* original_mov_dest = reinterpret_cast<unsigned char*>(
|
| + reinterpret_cast<__int64>(source + instruction_size) +
|
| + *(reinterpret_cast<int*>(source + mov_offset_in_instruction)));
|
| + unsigned char* stub_mov_from = target + instruction_size;
|
| + __int64 fixup_mov_offset = original_mov_dest - stub_mov_from;
|
| + if (fixup_mov_offset > INT_MAX || fixup_mov_offset < INT_MIN) {
|
| + SIDESTEP_ASSERT(false &&
|
| + "Unable to fix up near MOV because target is too far away.");
|
| + return SIDESTEP_UNEXPECTED;
|
| + }
|
| + *target_bytes = instruction_size;
|
| + if (target_size > *target_bytes) {
|
| + memcpy(reinterpret_cast<void*>(target),
|
| + reinterpret_cast<void*>(source),
|
| + mov_offset_in_instruction);
|
| + memcpy(reinterpret_cast<void*>(target + mov_offset_in_instruction),
|
| + reinterpret_cast<void*>(&fixup_mov_offset),
|
| + 4);
|
| + }
|
| + return SIDESTEP_SUCCESS;
|
| +}
|
| +
|
| }; // namespace sidestep
|
|
|