Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(29)

Side by Side Diff: third_party/tcmalloc/chromium/src/windows/preamble_patcher.cc

Issue 9667026: Revert 126020 - Experiment for updating the tcmalloc chromium branch to r144 (gperftools 2.0). (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: Created 8 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* Copyright (c) 2007, Google Inc. 1 /* Copyright (c) 2007, Google Inc.
2 * All rights reserved. 2 * All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 11 matching lines...) Expand all
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * 29 *
30 * --- 30 * ---
31 * Author: Joi Sigurdsson 31 * Author: Joi Sigurdsson
32 * Author: Scott Francis
33 * 32 *
34 * Implementation of PreamblePatcher 33 * Implementation of PreamblePatcher
35 */ 34 */
36 35
37 #include "preamble_patcher.h" 36 #include "preamble_patcher.h"
38 37
39 #include "mini_disassembler.h" 38 #include "mini_disassembler.h"
40 39
41 // compatibility shims 40 // compatibility shims
42 #include "base/logging.h" 41 #include "base/logging.h"
43 42
44 // Definitions of assembly statements we need 43 // Definitions of assembly statements we need
45 #define ASM_JMP32REL 0xE9 44 #define ASM_JMP32REL 0xE9
46 #define ASM_INT3 0xCC 45 #define ASM_INT3 0xCC
47 #define ASM_JMP32ABS_0 0xFF 46 #define ASM_JMP32ABS_0 0xFF
48 #define ASM_JMP32ABS_1 0x25 47 #define ASM_JMP32ABS_1 0x25
49 #define ASM_JMP8REL 0xEB 48 #define ASM_JMP8REL 0xEB
50 #define ASM_JCC32REL_0 0x0F
51 #define ASM_JCC32REL_1_MASK 0x80
52 #define ASM_NOP 0x90
53 // X64 opcodes
54 #define ASM_REXW 0x48
55 #define ASM_MOVRAX_IMM 0xB8
56 #define ASM_JMP 0xFF
57 #define ASM_JMP_RAX 0xE0
58 49
59 namespace sidestep { 50 namespace sidestep {
60 51
61 PreamblePatcher::PreamblePage* PreamblePatcher::preamble_pages_ = NULL;
62 long PreamblePatcher::granularity_ = 0;
63 long PreamblePatcher::pagesize_ = 0;
64 bool PreamblePatcher::initialized_ = false;
65
66 static const unsigned int kPreamblePageMagic = 0x4347414D; // "MAGC"
67
68 // Handle a special case that we see with functions that point into an 52 // Handle a special case that we see with functions that point into an
69 // IAT table (including functions linked statically into the 53 // IAT table (including functions linked statically into the
70 // application): these function already starts with ASM_JMP32*. For 54 // application): these function already starts with ASM_JMP32*. For
71 // instance, malloc() might be implemented as a JMP to __malloc(). 55 // instance, malloc() might be implemented as a JMP to __malloc().
72 // This function follows the initial JMPs for us, until we get to the 56 // This function follows the initial JMPs for us, until we get to the
73 // place where the actual code is defined. If we get to STOP_BEFORE, 57 // place where the actual code is defined. If we get to STOP_BEFORE,
74 // we return the address before stop_before. The stop_before_trampoline 58 // we return the address before stop_before.
75 // flag is used in 64-bit mode. If true, we will return the address
76 // before a trampoline is detected. Trampolines are defined as:
77 //
78 // nop
79 // mov rax, <replacement_function>
80 // jmp rax
81 //
82 // See PreamblePatcher::RawPatchWithStub for more information.
83 void* PreamblePatcher::ResolveTargetImpl(unsigned char* target, 59 void* PreamblePatcher::ResolveTargetImpl(unsigned char* target,
84 unsigned char* stop_before, 60 unsigned char* stop_before) {
85 bool stop_before_trampoline) {
86 if (target == NULL) 61 if (target == NULL)
87 return NULL; 62 return NULL;
88 while (1) { 63 while (1) {
89 unsigned char* new_target; 64 unsigned char* new_target;
90 if (target[0] == ASM_JMP32REL) { 65 if (target[0] == ASM_JMP32REL) {
91 // target[1-4] holds the place the jmp goes to, but it's 66 // target[1-4] holds the place the jmp goes to, but it's
92 // relative to the next instruction. 67 // relative to the next instruction.
93 int relative_offset; // Windows guarantees int is 4 bytes 68 int relative_offset; // Windows guarantees int is 4 bytes
94 SIDESTEP_ASSERT(sizeof(relative_offset) == 4); 69 SIDESTEP_ASSERT(sizeof(relative_offset) == 4);
95 memcpy(reinterpret_cast<void*>(&relative_offset), 70 memcpy(reinterpret_cast<void*>(&relative_offset),
96 reinterpret_cast<void*>(target + 1), 4); 71 reinterpret_cast<void*>(target + 1), 4);
97 new_target = target + 5 + relative_offset; 72 new_target = target + 5 + relative_offset;
98 } else if (target[0] == ASM_JMP8REL) { 73 } else if (target[0] == ASM_JMP8REL) {
99 // Visual Studio 7.1 implements new[] as an 8 bit jump to new 74 // Visual Studio 7.1 implements new[] as an 8 bit jump to new
100 signed char relative_offset; 75 signed char relative_offset;
101 memcpy(reinterpret_cast<void*>(&relative_offset), 76 memcpy(reinterpret_cast<void*>(&relative_offset),
102 reinterpret_cast<void*>(target + 1), 1); 77 reinterpret_cast<void*>(target + 1), 1);
103 new_target = target + 2 + relative_offset; 78 new_target = target + 2 + relative_offset;
104 } else if (target[0] == ASM_JMP32ABS_0 && 79 } else if (target[0] == ASM_JMP32ABS_0 &&
105 target[1] == ASM_JMP32ABS_1) { 80 target[1] == ASM_JMP32ABS_1) {
106 // Visual studio seems to sometimes do it this way instead of the 81 // Visual studio seems to sometimes do it this way instead of the
107 // previous way. Not sure what the rules are, but it was happening 82 // previous way. Not sure what the rules are, but it was happening
108 // with operator new in some binaries. 83 // with operator new in some binaries.
109 void** new_target_v; 84 void **new_target_v;
110 if (kIs64BitBinary) { 85 SIDESTEP_ASSERT(sizeof(new_target) == 4);
111 // In 64-bit mode JMPs are RIP-relative, not absolute 86 memcpy(&new_target_v, reinterpret_cast<void*>(target + 2), 4);
112 int target_offset;
113 memcpy(reinterpret_cast<void*>(&target_offset),
114 reinterpret_cast<void*>(target + 2), 4);
115 new_target_v = reinterpret_cast<void**>(target + target_offset + 6);
116 } else {
117 SIDESTEP_ASSERT(sizeof(new_target) == 4);
118 memcpy(&new_target_v, reinterpret_cast<void*>(target + 2), 4);
119 }
120 new_target = reinterpret_cast<unsigned char*>(*new_target_v); 87 new_target = reinterpret_cast<unsigned char*>(*new_target_v);
121 } else { 88 } else {
122 break; 89 break;
123 } 90 }
124 if (new_target == stop_before) 91 if (new_target == stop_before)
125 break; 92 break;
126 if (stop_before_trampoline && *new_target == ASM_NOP
127 && new_target[1] == ASM_REXW && new_target[2] == ASM_MOVRAX_IMM)
128 break;
129 target = new_target; 93 target = new_target;
130 } 94 }
131 return target; 95 return target;
132 } 96 }
133 97
134 // Special case scoped_ptr to avoid dependency on scoped_ptr below. 98 // Special case scoped_ptr to avoid dependency on scoped_ptr below.
135 class DeleteUnsignedCharArray { 99 class DeleteUnsignedCharArray {
136 public: 100 public:
137 DeleteUnsignedCharArray(unsigned char* array) : array_(array) { 101 DeleteUnsignedCharArray(unsigned char* array) : array_(array) {
138 } 102 }
139 103
140 ~DeleteUnsignedCharArray() { 104 ~DeleteUnsignedCharArray() {
141 if (array_) { 105 if (array_) {
142 PreamblePatcher::FreePreambleBlock(array_); 106 delete [] array_;
143 } 107 }
144 } 108 }
145 109
146 unsigned char* Release() { 110 unsigned char* Release() {
147 unsigned char* temp = array_; 111 unsigned char* temp = array_;
148 array_ = NULL; 112 array_ = NULL;
149 return temp; 113 return temp;
150 } 114 }
151 115
152 private: 116 private:
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
220 184
221 SideStepError PreamblePatcher::RawPatch(void* target_function, 185 SideStepError PreamblePatcher::RawPatch(void* target_function,
222 void* replacement_function, 186 void* replacement_function,
223 void** original_function_stub) { 187 void** original_function_stub) {
224 if (!target_function || !replacement_function || !original_function_stub || 188 if (!target_function || !replacement_function || !original_function_stub ||
225 (*original_function_stub) || target_function == replacement_function) { 189 (*original_function_stub) || target_function == replacement_function) {
226 SIDESTEP_ASSERT(false && "Preconditions not met"); 190 SIDESTEP_ASSERT(false && "Preconditions not met");
227 return SIDESTEP_INVALID_PARAMETER; 191 return SIDESTEP_INVALID_PARAMETER;
228 } 192 }
229 193
230 BOOL succeeded = FALSE; 194 // @see MAX_PREAMBLE_STUB_SIZE for an explanation of how we arrives at
231 195 // this size
232 // First, deal with a special case that we see with functions that 196 unsigned char* preamble_stub = new unsigned char[MAX_PREAMBLE_STUB_SIZE];
233 // point into an IAT table (including functions linked statically
234 // into the application): these function already starts with
235 // ASM_JMP32REL. For instance, malloc() might be implemented as a
236 // JMP to __malloc(). In that case, we replace the destination of
237 // the JMP (__malloc), rather than the JMP itself (malloc). This
238 // way we get the correct behavior no matter how malloc gets called.
239 void* new_target = ResolveTarget(target_function);
240 if (new_target != target_function) {
241 target_function = new_target;
242 }
243
244 // In 64-bit mode, preamble_stub must be within 2GB of target function
245 // so that if target contains a jump, we can translate it.
246 unsigned char* preamble_stub = AllocPreambleBlockNear(target_function);
247 if (!preamble_stub) { 197 if (!preamble_stub) {
248 SIDESTEP_ASSERT(false && "Unable to allocate preamble-stub."); 198 SIDESTEP_ASSERT(false && "Unable to allocate preamble-stub.");
249 return SIDESTEP_INSUFFICIENT_BUFFER; 199 return SIDESTEP_INSUFFICIENT_BUFFER;
250 } 200 }
251 201
252 // Frees the array at end of scope. 202 // Frees the array at end of scope.
253 DeleteUnsignedCharArray guard_preamble_stub(preamble_stub); 203 DeleteUnsignedCharArray guard_preamble_stub(preamble_stub);
254 204
205 // Change the protection of the newly allocated preamble stub to
206 // PAGE_EXECUTE_READWRITE. This is required to work with DEP (Data
207 // Execution Prevention) which will cause an exception if code is executed
208 // from a page on which you do not have read access.
209 DWORD old_stub_protect = 0;
210 BOOL succeeded = ::VirtualProtect(preamble_stub, MAX_PREAMBLE_STUB_SIZE,
211 PAGE_EXECUTE_READWRITE, &old_stub_protect);
212 if (!succeeded) {
213 SIDESTEP_ASSERT(false &&
214 "Failed to make page preamble stub read-write-execute.");
215 return SIDESTEP_ACCESS_DENIED;
216 }
217
255 SideStepError error_code = RawPatchWithStubAndProtections( 218 SideStepError error_code = RawPatchWithStubAndProtections(
256 target_function, replacement_function, preamble_stub, 219 target_function, replacement_function, preamble_stub,
257 MAX_PREAMBLE_STUB_SIZE, NULL); 220 MAX_PREAMBLE_STUB_SIZE, NULL);
258 221
259 if (SIDESTEP_SUCCESS != error_code) { 222 if (SIDESTEP_SUCCESS != error_code) {
260 SIDESTEP_ASSERT(false); 223 SIDESTEP_ASSERT(false);
261 return error_code; 224 return error_code;
262 } 225 }
263 226
264 // Flush the instruction cache to make sure the processor doesn't execute the 227 // Flush the instruction cache to make sure the processor doesn't execute the
(...skipping 25 matching lines...) Expand all
290 SideStepError PreamblePatcher::Unpatch(void* target_function, 253 SideStepError PreamblePatcher::Unpatch(void* target_function,
291 void* replacement_function, 254 void* replacement_function,
292 void* original_function_stub) { 255 void* original_function_stub) {
293 SIDESTEP_ASSERT(target_function && replacement_function && 256 SIDESTEP_ASSERT(target_function && replacement_function &&
294 original_function_stub); 257 original_function_stub);
295 if (!target_function || !replacement_function || 258 if (!target_function || !replacement_function ||
296 !original_function_stub) { 259 !original_function_stub) {
297 return SIDESTEP_INVALID_PARAMETER; 260 return SIDESTEP_INVALID_PARAMETER;
298 } 261 }
299 262
263 // We disassemble the preamble of the _stub_ to see how many bytes we
264 // originally copied to the stub.
265 MiniDisassembler disassembler;
266 unsigned int preamble_bytes = 0;
267 while (preamble_bytes < 5) {
268 InstructionType instruction_type =
269 disassembler.Disassemble(
270 reinterpret_cast<unsigned char*>(original_function_stub) +
271 preamble_bytes,
272 preamble_bytes);
273 if (IT_GENERIC != instruction_type) {
274 SIDESTEP_ASSERT(false &&
275 "Should only have generic instructions in stub!!");
276 return SIDESTEP_UNSUPPORTED_INSTRUCTION;
277 }
278 }
279
300 // Before unpatching, target_function should be a JMP to 280 // Before unpatching, target_function should be a JMP to
301 // replacement_function. If it's not, then either it's an error, or 281 // replacement_function. If it's not, then either it's an error, or
302 // we're falling into the case where the original instruction was a 282 // we're falling into the case where the original instruction was a
303 // JMP, and we patched the jumped_to address rather than the JMP 283 // JMP, and we patched the jumped_to address rather than the JMP
304 // itself. (For instance, if malloc() is just a JMP to __malloc(), 284 // itself. (For instance, if malloc() is just a JMP to __malloc(),
305 // we patched __malloc() and not malloc().) 285 // we patched __malloc() and not malloc().)
306 unsigned char* target = reinterpret_cast<unsigned char*>(target_function); 286 unsigned char* target = reinterpret_cast<unsigned char*>(target_function);
307 target = reinterpret_cast<unsigned char*>( 287 target = reinterpret_cast<unsigned char*>(
308 ResolveTargetImpl( 288 ResolveTargetImpl(
309 target, reinterpret_cast<unsigned char*>(replacement_function), 289 target, reinterpret_cast<unsigned char*>(replacement_function)));
310 true));
311 // We should end at the function we patched. When we patch, we insert 290 // We should end at the function we patched. When we patch, we insert
312 // a ASM_JMP32REL instruction, so look for that as a sanity check. 291 // a ASM_JMP32REL instruction, so look for that as a sanity check.
313 if (target[0] != ASM_JMP32REL) { 292 if (target[0] != ASM_JMP32REL) {
314 SIDESTEP_ASSERT(false && 293 SIDESTEP_ASSERT(false &&
315 "target_function does not look like it was patched."); 294 "target_function does not look like it was patched.");
316 return SIDESTEP_INVALID_PARAMETER; 295 return SIDESTEP_INVALID_PARAMETER;
317 } 296 }
318 297
319 const unsigned int kRequiredTargetPatchBytes = 5;
320
321 // We need to be able to write to a process-local copy of the first 298 // We need to be able to write to a process-local copy of the first
322 // kRequiredTargetPatchBytes bytes of target_function 299 // MAX_PREAMBLE_STUB_SIZE bytes of target_function
323 DWORD old_target_function_protect = 0; 300 DWORD old_target_function_protect = 0;
324 BOOL succeeded = ::VirtualProtect(reinterpret_cast<void*>(target), 301 BOOL succeeded = ::VirtualProtect(reinterpret_cast<void*>(target_function),
325 kRequiredTargetPatchBytes, 302 MAX_PREAMBLE_STUB_SIZE,
326 PAGE_EXECUTE_READWRITE, 303 PAGE_EXECUTE_READWRITE,
327 &old_target_function_protect); 304 &old_target_function_protect);
328 if (!succeeded) { 305 if (!succeeded) {
329 SIDESTEP_ASSERT(false && "Failed to make page containing target function " 306 SIDESTEP_ASSERT(false && "Failed to make page containing target function "
330 "copy-on-write."); 307 "copy-on-write.");
331 return SIDESTEP_ACCESS_DENIED; 308 return SIDESTEP_ACCESS_DENIED;
332 } 309 }
333 310
334 unsigned char* preamble_stub = reinterpret_cast<unsigned char*>( 311 // Replace the first few bytes of the original function with the bytes we
335 original_function_stub); 312 // previously moved to the preamble stub.
313 memcpy(reinterpret_cast<void*>(target),
314 original_function_stub, preamble_bytes);
336 315
337 // Disassemble the preamble of stub and copy the bytes back to target. 316 // Stub is now useless so delete it.
338 // If we've done any conditional jumps in the preamble we need to convert 317 // [csilvers: Commented out for perftools because it causes big problems
339 // them back to the orignal REL8 jumps in the target. 318 // when we're unpatching malloc. We just let this live on as a leak.]
340 MiniDisassembler disassembler; 319 //delete [] reinterpret_cast<unsigned char*>(original_function_stub);
341 unsigned int preamble_bytes = 0;
342 unsigned int target_bytes = 0;
343 while (target_bytes < kRequiredTargetPatchBytes) {
344 unsigned int cur_bytes = 0;
345 InstructionType instruction_type =
346 disassembler.Disassemble(preamble_stub + preamble_bytes, cur_bytes);
347 if (IT_JUMP == instruction_type) {
348 unsigned int jump_bytes = 0;
349 SideStepError jump_ret = SIDESTEP_JUMP_INSTRUCTION;
350 if (IsNearConditionalJump(preamble_stub + preamble_bytes, cur_bytes) ||
351 IsNearRelativeJump(preamble_stub + preamble_bytes, cur_bytes) ||
352 IsNearAbsoluteCall(preamble_stub + preamble_bytes, cur_bytes) ||
353 IsNearRelativeCall(preamble_stub + preamble_bytes, cur_bytes)) {
354 jump_ret = PatchNearJumpOrCall(preamble_stub + preamble_bytes,
355 cur_bytes, target + target_bytes,
356 &jump_bytes, MAX_PREAMBLE_STUB_SIZE);
357 }
358 if (jump_ret == SIDESTEP_JUMP_INSTRUCTION) {
359 SIDESTEP_ASSERT(false &&
360 "Found unsupported jump instruction in stub!!");
361 return SIDESTEP_UNSUPPORTED_INSTRUCTION;
362 }
363 target_bytes += jump_bytes;
364 } else if (IT_GENERIC == instruction_type) {
365 if (IsMovWithDisplacement(preamble_stub + preamble_bytes, cur_bytes)) {
366 unsigned int mov_bytes = 0;
367 if (PatchMovWithDisplacement(preamble_stub + preamble_bytes, cur_bytes,
368 target + target_bytes, &mov_bytes,
369 MAX_PREAMBLE_STUB_SIZE)
370 != SIDESTEP_SUCCESS) {
371 SIDESTEP_ASSERT(false &&
372 "Found unsupported generic instruction in stub!!");
373 return SIDESTEP_UNSUPPORTED_INSTRUCTION;
374 }
375 } else {
376 memcpy(reinterpret_cast<void*>(target + target_bytes),
377 reinterpret_cast<void*>(reinterpret_cast<unsigned char*>(
378 original_function_stub) + preamble_bytes), cur_bytes);
379 target_bytes += cur_bytes;
380 }
381 } else {
382 SIDESTEP_ASSERT(false &&
383 "Found unsupported instruction in stub!!");
384 return SIDESTEP_UNSUPPORTED_INSTRUCTION;
385 }
386 preamble_bytes += cur_bytes;
387 }
388 320
389 FreePreambleBlock(reinterpret_cast<unsigned char*>(original_function_stub)); 321 // Restore the protection of the first MAX_PREAMBLE_STUB_SIZE bytes of
390
391 // Restore the protection of the first kRequiredTargetPatchBytes bytes of
392 // target to what they were before we started goofing around. 322 // target to what they were before we started goofing around.
393 succeeded = ::VirtualProtect(reinterpret_cast<void*>(target), 323 succeeded = ::VirtualProtect(reinterpret_cast<void*>(target),
394 kRequiredTargetPatchBytes, 324 MAX_PREAMBLE_STUB_SIZE,
395 old_target_function_protect, 325 old_target_function_protect,
396 &old_target_function_protect); 326 &old_target_function_protect);
397 327
398 // Flush the instruction cache to make sure the processor doesn't execute the 328 // Flush the instruction cache to make sure the processor doesn't execute the
399 // old version of the instructions (before our patch). 329 // old version of the instructions (before our patch).
400 // 330 //
401 // See comment on FlushInstructionCache elsewhere in this file. 331 // See comment on FlushInstructionCache elsewhere in this file.
402 succeeded = ::FlushInstructionCache(::GetCurrentProcess(), 332 succeeded = ::FlushInstructionCache(::GetCurrentProcess(),
403 target, 333 target,
404 MAX_PREAMBLE_STUB_SIZE); 334 MAX_PREAMBLE_STUB_SIZE);
405 if (!succeeded) { 335 if (!succeeded) {
406 SIDESTEP_ASSERT(false && "Failed to flush instruction cache."); 336 SIDESTEP_ASSERT(false && "Failed to flush instruction cache.");
407 return SIDESTEP_UNEXPECTED; 337 return SIDESTEP_UNEXPECTED;
408 } 338 }
409 339
410 SIDESTEP_LOG("PreamblePatcher::Unpatch successfully unpatched."); 340 SIDESTEP_LOG("PreamblePatcher::Unpatch successfully unpatched.");
411 return SIDESTEP_SUCCESS; 341 return SIDESTEP_SUCCESS;
412 } 342 }
413 343
414 void PreamblePatcher::Initialize() {
415 if (!initialized_) {
416 SYSTEM_INFO si = { 0 };
417 ::GetSystemInfo(&si);
418 granularity_ = si.dwAllocationGranularity;
419 pagesize_ = si.dwPageSize;
420 initialized_ = true;
421 }
422 }
423
424 unsigned char* PreamblePatcher::AllocPreambleBlockNear(void* target) {
425 PreamblePage* preamble_page = preamble_pages_;
426 while (preamble_page != NULL) {
427 if (preamble_page->free_ != NULL) {
428 __int64 val = reinterpret_cast<__int64>(preamble_page) -
429 reinterpret_cast<__int64>(target);
430 if ((val > 0 && val + pagesize_ <= INT_MAX) ||
431 (val < 0 && val >= INT_MIN)) {
432 break;
433 }
434 }
435 preamble_page = preamble_page->next_;
436 }
437
438 // The free_ member of the page is used to store the next available block
439 // of memory to use or NULL if there are no chunks available, in which case
440 // we'll allocate a new page.
441 if (preamble_page == NULL || preamble_page->free_ == NULL) {
442 // Create a new preamble page and initialize the free list
443 preamble_page = reinterpret_cast<PreamblePage*>(AllocPageNear(target));
444 SIDESTEP_ASSERT(preamble_page != NULL && "Could not allocate page!");
445 void** pp = &preamble_page->free_;
446 unsigned char* ptr = reinterpret_cast<unsigned char*>(preamble_page) +
447 MAX_PREAMBLE_STUB_SIZE;
448 unsigned char* limit = reinterpret_cast<unsigned char*>(preamble_page) +
449 pagesize_;
450 while (ptr < limit) {
451 *pp = ptr;
452 pp = reinterpret_cast<void**>(ptr);
453 ptr += MAX_PREAMBLE_STUB_SIZE;
454 }
455 *pp = NULL;
456 // Insert the new page into the list
457 preamble_page->magic_ = kPreamblePageMagic;
458 preamble_page->next_ = preamble_pages_;
459 preamble_pages_ = preamble_page;
460 }
461 unsigned char* ret = reinterpret_cast<unsigned char*>(preamble_page->free_);
462 preamble_page->free_ = *(reinterpret_cast<void**>(preamble_page->free_));
463 return ret;
464 }
465
466 void PreamblePatcher::FreePreambleBlock(unsigned char* block) {
467 SIDESTEP_ASSERT(block != NULL);
468 SIDESTEP_ASSERT(granularity_ != 0);
469 uintptr_t ptr = reinterpret_cast<uintptr_t>(block);
470 ptr -= ptr & (granularity_ - 1);
471 PreamblePage* preamble_page = reinterpret_cast<PreamblePage*>(ptr);
472 SIDESTEP_ASSERT(preamble_page->magic_ == kPreamblePageMagic);
473 *(reinterpret_cast<void**>(block)) = preamble_page->free_;
474 preamble_page->free_ = block;
475 }
476
477 void* PreamblePatcher::AllocPageNear(void* target) {
478 MEMORY_BASIC_INFORMATION mbi = { 0 };
479 if (!::VirtualQuery(target, &mbi, sizeof(mbi))) {
480 SIDESTEP_ASSERT(false && "VirtualQuery failed on target address");
481 return 0;
482 }
483 if (initialized_ == false) {
484 PreamblePatcher::Initialize();
485 SIDESTEP_ASSERT(initialized_);
486 }
487 void* pv = NULL;
488 unsigned char* allocation_base = reinterpret_cast<unsigned char*>(
489 mbi.AllocationBase);
490 __int64 i = 1;
491 bool high_target = reinterpret_cast<__int64>(target) > UINT_MAX;
492 while (pv == NULL) {
493 __int64 val = reinterpret_cast<__int64>(allocation_base) -
494 (i * granularity_);
495 if (high_target &&
496 reinterpret_cast<__int64>(target) - val > INT_MAX) {
497 // We're further than 2GB from the target
498 break;
499 } else if (val <= NULL) {
500 // Less than 0
501 break;
502 }
503 pv = ::VirtualAlloc(reinterpret_cast<void*>(allocation_base -
504 (i++ * granularity_)),
505 pagesize_, MEM_COMMIT | MEM_RESERVE,
506 PAGE_EXECUTE_READWRITE);
507 }
508
509 // We couldn't allocate low, try to allocate high
510 if (pv == NULL) {
511 i = 1;
512 // Round up to the next multiple of page granularity
513 allocation_base = reinterpret_cast<unsigned char*>(
514 (reinterpret_cast<__int64>(target) &
515 (~(granularity_ - 1))) + granularity_);
516 while (pv == NULL) {
517 __int64 val = reinterpret_cast<__int64>(allocation_base) +
518 (i * granularity_) - reinterpret_cast<__int64>(target);
519 if (val > INT_MAX || val < 0) {
520 // We're too far or we overflowed
521 break;
522 }
523 pv = ::VirtualAlloc(reinterpret_cast<void*>(allocation_base +
524 (i++ * granularity_)),
525 pagesize_, MEM_COMMIT | MEM_RESERVE,
526 PAGE_EXECUTE_READWRITE);
527 }
528 }
529 return pv;
530 }
531
532 bool PreamblePatcher::IsShortConditionalJump(
533 unsigned char* target,
534 unsigned int instruction_size) {
535 return (*(target) & 0x70) == 0x70 && instruction_size == 2;
536 }
537
538 bool PreamblePatcher::IsNearConditionalJump(
539 unsigned char* target,
540 unsigned int instruction_size) {
541 return *(target) == 0xf && (*(target + 1) & 0x80) == 0x80 &&
542 instruction_size == 6;
543 }
544
545 bool PreamblePatcher::IsNearRelativeJump(
546 unsigned char* target,
547 unsigned int instruction_size) {
548 return *(target) == 0xe9 && instruction_size == 5;
549 }
550
551 bool PreamblePatcher::IsNearAbsoluteCall(
552 unsigned char* target,
553 unsigned int instruction_size) {
554 return *(target) == 0xff && (*(target + 1) & 0x10) == 0x10 &&
555 instruction_size == 6;
556 }
557
558 bool PreamblePatcher::IsNearRelativeCall(
559 unsigned char* target,
560 unsigned int instruction_size) {
561 return *(target) == 0xe8 && instruction_size == 5;
562 }
563
564 bool PreamblePatcher::IsMovWithDisplacement(
565 unsigned char* target,
566 unsigned int instruction_size) {
567 // In this case, the ModRM byte's mod field will be 0 and r/m will be 101b (5)
568 return instruction_size == 7 && *target == 0x48 && *(target + 1) == 0x8b &&
569 (*(target + 2) >> 6) == 0 && (*(target + 2) & 0x7) == 5;
570 }
571
572 SideStepError PreamblePatcher::PatchShortConditionalJump(
573 unsigned char* source,
574 unsigned int instruction_size,
575 unsigned char* target,
576 unsigned int* target_bytes,
577 unsigned int target_size) {
578 unsigned char* original_jump_dest = (source + 2) + source[1];
579 unsigned char* stub_jump_from = target + 6;
580 __int64 fixup_jump_offset = original_jump_dest - stub_jump_from;
581 if (fixup_jump_offset > INT_MAX || fixup_jump_offset < INT_MIN) {
582 SIDESTEP_ASSERT(false &&
583 "Unable to fix up short jump because target"
584 " is too far away.");
585 return SIDESTEP_JUMP_INSTRUCTION;
586 }
587
588 *target_bytes = 6;
589 if (target_size > *target_bytes) {
590 // Convert the short jump to a near jump.
591 //
592 // 0f 8x xx xx xx xx = Jcc rel32off
593 unsigned short jmpcode = ((0x80 | (source[0] & 0xf)) << 8) | 0x0f;
594 memcpy(reinterpret_cast<void*>(target),
595 reinterpret_cast<void*>(&jmpcode), 2);
596 memcpy(reinterpret_cast<void*>(target + 2),
597 reinterpret_cast<void*>(&fixup_jump_offset), 4);
598 }
599
600 return SIDESTEP_SUCCESS;
601 }
602
603 SideStepError PreamblePatcher::PatchNearJumpOrCall(
604 unsigned char* source,
605 unsigned int instruction_size,
606 unsigned char* target,
607 unsigned int* target_bytes,
608 unsigned int target_size) {
609 SIDESTEP_ASSERT(instruction_size == 5 || instruction_size == 6);
610 unsigned int jmp_offset_in_instruction = instruction_size == 5 ? 1 : 2;
611 unsigned char* original_jump_dest = reinterpret_cast<unsigned char *>(
612 reinterpret_cast<__int64>(source + instruction_size) +
613 *(reinterpret_cast<int*>(source + jmp_offset_in_instruction)));
614 unsigned char* stub_jump_from = target + instruction_size;
615 __int64 fixup_jump_offset = original_jump_dest - stub_jump_from;
616 if (fixup_jump_offset > INT_MAX || fixup_jump_offset < INT_MIN) {
617 SIDESTEP_ASSERT(false &&
618 "Unable to fix up near jump because target"
619 " is too far away.");
620 return SIDESTEP_JUMP_INSTRUCTION;
621 }
622
623 if ((fixup_jump_offset < SCHAR_MAX && fixup_jump_offset > SCHAR_MIN)) {
624 *target_bytes = 2;
625 if (target_size > *target_bytes) {
626 // If the new offset is in range, use a short jump instead of a near jump.
627 if (source[0] == ASM_JCC32REL_0 &&
628 (source[1] & ASM_JCC32REL_1_MASK) == ASM_JCC32REL_1_MASK) {
629 unsigned short jmpcode = (static_cast<unsigned char>(
630 fixup_jump_offset) << 8) | (0x70 | (source[1] & 0xf));
631 memcpy(reinterpret_cast<void*>(target),
632 reinterpret_cast<void*>(&jmpcode),
633 2);
634 } else {
635 target[0] = ASM_JMP8REL;
636 target[1] = static_cast<unsigned char>(fixup_jump_offset);
637 }
638 }
639 } else {
640 *target_bytes = instruction_size;
641 if (target_size > *target_bytes) {
642 memcpy(reinterpret_cast<void*>(target),
643 reinterpret_cast<void*>(source),
644 jmp_offset_in_instruction);
645 memcpy(reinterpret_cast<void*>(target + jmp_offset_in_instruction),
646 reinterpret_cast<void*>(&fixup_jump_offset),
647 4);
648 }
649 }
650
651 return SIDESTEP_SUCCESS;
652 }
653
654 SideStepError PreamblePatcher::PatchMovWithDisplacement(
655 unsigned char* source,
656 unsigned int instruction_size,
657 unsigned char* target,
658 unsigned int* target_bytes,
659 unsigned int target_size) {
660 SIDESTEP_ASSERT(instruction_size == 7);
661 const int mov_offset_in_instruction = 3; // 0x48 0x8b 0x0d <offset>
662 unsigned char* original_mov_dest = reinterpret_cast<unsigned char*>(
663 reinterpret_cast<__int64>(source + instruction_size) +
664 *(reinterpret_cast<int*>(source + mov_offset_in_instruction)));
665 unsigned char* stub_mov_from = target + instruction_size;
666 __int64 fixup_mov_offset = original_mov_dest - stub_mov_from;
667 if (fixup_mov_offset > INT_MAX || fixup_mov_offset < INT_MIN) {
668 SIDESTEP_ASSERT(false &&
669 "Unable to fix up near MOV because target is too far away.");
670 return SIDESTEP_UNEXPECTED;
671 }
672 *target_bytes = instruction_size;
673 if (target_size > *target_bytes) {
674 memcpy(reinterpret_cast<void*>(target),
675 reinterpret_cast<void*>(source),
676 mov_offset_in_instruction);
677 memcpy(reinterpret_cast<void*>(target + mov_offset_in_instruction),
678 reinterpret_cast<void*>(&fixup_mov_offset),
679 4);
680 }
681 return SIDESTEP_SUCCESS;
682 }
683
684 }; // namespace sidestep 344 }; // namespace sidestep
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698