| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 813 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 824 static size_t allocate_alignment = 0; | 824 static size_t allocate_alignment = 0; |
| 825 if (allocate_alignment == 0) { | 825 if (allocate_alignment == 0) { |
| 826 SYSTEM_INFO info; | 826 SYSTEM_INFO info; |
| 827 GetSystemInfo(&info); | 827 GetSystemInfo(&info); |
| 828 allocate_alignment = info.dwAllocationGranularity; | 828 allocate_alignment = info.dwAllocationGranularity; |
| 829 } | 829 } |
| 830 return allocate_alignment; | 830 return allocate_alignment; |
| 831 } | 831 } |
| 832 | 832 |
| 833 | 833 |
| 834 static void* GetRandomAddr() { |
| 835 Isolate* isolate = Isolate::UncheckedCurrent(); |
| 836 // Note that the current isolate isn't set up in a call path via |
| 837 // CpuFeatures::Probe. We don't care about randomization in this case because |
| 838 // the code page is immediately freed. |
| 839 if (isolate != NULL) { |
| 840 // The address range used to randomize RWX allocations in OS::Allocate |
| 841 // Try not to map pages into the default range that windows loads DLLs |
| 842 // Use a multiple of 64k to prevent committing unused memory. |
| 843 // Note: This does not guarantee RWX regions will be within the |
| 844 // range kAllocationRandomAddressMin to kAllocationRandomAddressMax |
| 845 #ifdef V8_HOST_ARCH_64_BIT |
| 846 static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000; |
| 847 static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000; |
| 848 #else |
| 849 static const intptr_t kAllocationRandomAddressMin = 0x04000000; |
| 850 static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000; |
| 851 #endif |
| 852 uintptr_t address = (V8::RandomPrivate(isolate) << kPageSizeBits) |
| 853 | kAllocationRandomAddressMin; |
| 854 address &= kAllocationRandomAddressMax; |
| 855 return reinterpret_cast<void *>(address); |
| 856 } |
| 857 return NULL; |
| 858 } |
| 859 |
| 860 |
| 861 static void* RandomizedVirtualAlloc(size_t size, int action, int protection) { |
| 862 LPVOID base = NULL; |
| 863 |
| 864 if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) { |
| 865 // For exectutable pages try and randomize the allocation address |
| 866 for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) { |
| 867 base = VirtualAlloc(GetRandomAddr(), size, action, protection); |
| 868 } |
| 869 } |
| 870 |
| 871 // After three attempts give up and let the OS find an address to use. |
| 872 if (base == NULL) base = VirtualAlloc(NULL, size, action, protection); |
| 873 |
| 874 return base; |
| 875 } |
| 876 |
| 877 |
| 834 void* OS::Allocate(const size_t requested, | 878 void* OS::Allocate(const size_t requested, |
| 835 size_t* allocated, | 879 size_t* allocated, |
| 836 bool is_executable) { | 880 bool is_executable) { |
| 837 // The address range used to randomize RWX allocations in OS::Allocate | |
| 838 // Try not to map pages into the default range that windows loads DLLs | |
| 839 // Use a multiple of 64k to prevent committing unused memory. | |
| 840 // Note: This does not guarantee RWX regions will be within the | |
| 841 // range kAllocationRandomAddressMin to kAllocationRandomAddressMax | |
| 842 #ifdef V8_HOST_ARCH_64_BIT | |
| 843 static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000; | |
| 844 static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000; | |
| 845 #else | |
| 846 static const intptr_t kAllocationRandomAddressMin = 0x04000000; | |
| 847 static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000; | |
| 848 #endif | |
| 849 | |
| 850 // VirtualAlloc rounds allocated size to page size automatically. | 881 // VirtualAlloc rounds allocated size to page size automatically. |
| 851 size_t msize = RoundUp(requested, static_cast<int>(GetPageSize())); | 882 size_t msize = RoundUp(requested, static_cast<int>(GetPageSize())); |
| 852 intptr_t address = 0; | |
| 853 | 883 |
| 854 // Windows XP SP2 allows Data Excution Prevention (DEP). | 884 // Windows XP SP2 allows Data Excution Prevention (DEP). |
| 855 int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; | 885 int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; |
| 856 | 886 |
| 857 // For exectutable pages try and randomize the allocation address | 887 LPVOID mbase = RandomizedVirtualAlloc(msize, |
| 858 if (prot == PAGE_EXECUTE_READWRITE && | 888 MEM_COMMIT | MEM_RESERVE, |
| 859 msize >= static_cast<size_t>(Page::kPageSize)) { | 889 prot); |
| 860 address = (V8::RandomPrivate(Isolate::Current()) << kPageSizeBits) | |
| 861 | kAllocationRandomAddressMin; | |
| 862 address &= kAllocationRandomAddressMax; | |
| 863 } | |
| 864 | |
| 865 LPVOID mbase = VirtualAlloc(reinterpret_cast<void *>(address), | |
| 866 msize, | |
| 867 MEM_COMMIT | MEM_RESERVE, | |
| 868 prot); | |
| 869 if (mbase == NULL && address != 0) | |
| 870 mbase = VirtualAlloc(NULL, msize, MEM_COMMIT | MEM_RESERVE, prot); | |
| 871 | 890 |
| 872 if (mbase == NULL) { | 891 if (mbase == NULL) { |
| 873 LOG(ISOLATE, StringEvent("OS::Allocate", "VirtualAlloc failed")); | 892 LOG(ISOLATE, StringEvent("OS::Allocate", "VirtualAlloc failed")); |
| 874 return NULL; | 893 return NULL; |
| 875 } | 894 } |
| 876 | 895 |
| 877 ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment())); | 896 ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment())); |
| 878 | 897 |
| 879 *allocated = msize; | 898 *allocated = msize; |
| 880 UpdateAllocatedSpaceLimits(mbase, static_cast<int>(msize)); | 899 UpdateAllocatedSpaceLimits(mbase, static_cast<int>(msize)); |
| (...skipping 583 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1464 } | 1483 } |
| 1465 | 1484 |
| 1466 | 1485 |
| 1467 bool VirtualMemory::Uncommit(void* address, size_t size) { | 1486 bool VirtualMemory::Uncommit(void* address, size_t size) { |
| 1468 ASSERT(IsReserved()); | 1487 ASSERT(IsReserved()); |
| 1469 return UncommitRegion(address, size); | 1488 return UncommitRegion(address, size); |
| 1470 } | 1489 } |
| 1471 | 1490 |
| 1472 | 1491 |
| 1473 void* VirtualMemory::ReserveRegion(size_t size) { | 1492 void* VirtualMemory::ReserveRegion(size_t size) { |
| 1474 return VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS); | 1493 return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS); |
| 1475 } | 1494 } |
| 1476 | 1495 |
| 1477 | 1496 |
| 1478 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { | 1497 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { |
| 1479 int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; | 1498 int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; |
| 1480 if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) { | 1499 if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) { |
| 1481 return false; | 1500 return false; |
| 1482 } | 1501 } |
| 1483 | 1502 |
| 1484 UpdateAllocatedSpaceLimits(base, static_cast<int>(size)); | 1503 UpdateAllocatedSpaceLimits(base, static_cast<int>(size)); |
| (...skipping 15 matching lines...) Expand all Loading... |
| 1500 bool VirtualMemory::UncommitRegion(void* base, size_t size) { | 1519 bool VirtualMemory::UncommitRegion(void* base, size_t size) { |
| 1501 return VirtualFree(base, size, MEM_DECOMMIT) != 0; | 1520 return VirtualFree(base, size, MEM_DECOMMIT) != 0; |
| 1502 } | 1521 } |
| 1503 | 1522 |
| 1504 | 1523 |
| 1505 bool VirtualMemory::ReleaseRegion(void* base, size_t size) { | 1524 bool VirtualMemory::ReleaseRegion(void* base, size_t size) { |
| 1506 return VirtualFree(base, 0, MEM_RELEASE) != 0; | 1525 return VirtualFree(base, 0, MEM_RELEASE) != 0; |
| 1507 } | 1526 } |
| 1508 | 1527 |
| 1509 | 1528 |
| 1510 | |
| 1511 // ---------------------------------------------------------------------------- | 1529 // ---------------------------------------------------------------------------- |
| 1512 // Win32 thread support. | 1530 // Win32 thread support. |
| 1513 | 1531 |
| 1514 // Definition of invalid thread handle and id. | 1532 // Definition of invalid thread handle and id. |
| 1515 static const HANDLE kNoThread = INVALID_HANDLE_VALUE; | 1533 static const HANDLE kNoThread = INVALID_HANDLE_VALUE; |
| 1516 | 1534 |
| 1517 // Entry point for threads. The supplied argument is a pointer to the thread | 1535 // Entry point for threads. The supplied argument is a pointer to the thread |
| 1518 // object. The entry function dispatches to the run method in the thread | 1536 // object. The entry function dispatches to the run method in the thread |
| 1519 // object. It is important that this function has __stdcall calling | 1537 // object. It is important that this function has __stdcall calling |
| 1520 // convention. | 1538 // convention. |
| (...skipping 524 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2045 | 2063 |
| 2046 | 2064 |
| 2047 void Sampler::Stop() { | 2065 void Sampler::Stop() { |
| 2048 ASSERT(IsActive()); | 2066 ASSERT(IsActive()); |
| 2049 SamplerThread::RemoveActiveSampler(this); | 2067 SamplerThread::RemoveActiveSampler(this); |
| 2050 SetActive(false); | 2068 SetActive(false); |
| 2051 } | 2069 } |
| 2052 | 2070 |
| 2053 | 2071 |
| 2054 } } // namespace v8::internal | 2072 } } // namespace v8::internal |
| OLD | NEW |