OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 818 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
829 } | 829 } |
830 return allocate_alignment; | 830 return allocate_alignment; |
831 } | 831 } |
832 | 832 |
833 | 833 |
834 intptr_t OS::CommitPageSize() { | 834 intptr_t OS::CommitPageSize() { |
835 return 4096; | 835 return 4096; |
836 } | 836 } |
837 | 837 |
838 | 838 |
| 839 static void* GetRandomAddr() { |
| 840 Isolate* isolate = Isolate::UncheckedCurrent(); |
| 841 // Note that the current isolate isn't set up in a call path via |
| 842 // CpuFeatures::Probe. We don't care about randomization in this case because |
| 843 // the code page is immediately freed. |
| 844 if (isolate != NULL) { |
| 845 // The address range used to randomize RWX allocations in OS::Allocate |
| 846 // Try not to map pages into the default range that windows loads DLLs |
| 847 // Use a multiple of 64k to prevent committing unused memory. |
| 848 // Note: This does not guarantee RWX regions will be within the |
| 849 // range kAllocationRandomAddressMin to kAllocationRandomAddressMax |
| 850 #ifdef V8_HOST_ARCH_64_BIT |
| 851 static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000; |
| 852 static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000; |
| 853 #else |
| 854 static const intptr_t kAllocationRandomAddressMin = 0x04000000; |
| 855 static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000; |
| 856 #endif |
| 857 uintptr_t address = (V8::RandomPrivate(isolate) << kPageSizeBits) |
| 858 | kAllocationRandomAddressMin; |
| 859 address &= kAllocationRandomAddressMax; |
| 860 return reinterpret_cast<void *>(address); |
| 861 } |
| 862 return NULL; |
| 863 } |
| 864 |
| 865 |
| 866 static void* RandomizedVirtualAlloc(size_t size, int action, int protection) { |
| 867 LPVOID base = NULL; |
| 868 |
| 869 if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) { |
| 870 // For exectutable pages try and randomize the allocation address |
| 871 for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) { |
| 872 base = VirtualAlloc(GetRandomAddr(), size, action, protection); |
| 873 } |
| 874 } |
| 875 |
| 876 // After three attempts give up and let the OS find an address to use. |
| 877 if (base == NULL) base = VirtualAlloc(NULL, size, action, protection); |
| 878 |
| 879 return base; |
| 880 } |
| 881 |
| 882 |
839 void* OS::Allocate(const size_t requested, | 883 void* OS::Allocate(const size_t requested, |
840 size_t* allocated, | 884 size_t* allocated, |
841 bool is_executable) { | 885 bool is_executable) { |
842 // The address range used to randomize RWX allocations in OS::Allocate | |
843 // Try not to map pages into the default range that windows loads DLLs | |
844 // Use a multiple of 64k to prevent committing unused memory. | |
845 // Note: This does not guarantee RWX regions will be within the | |
846 // range kAllocationRandomAddressMin to kAllocationRandomAddressMax | |
847 #ifdef V8_HOST_ARCH_64_BIT | |
848 static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000; | |
849 static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000; | |
850 #else | |
851 static const intptr_t kAllocationRandomAddressMin = 0x04000000; | |
852 static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000; | |
853 #endif | |
854 | |
855 // VirtualAlloc rounds allocated size to page size automatically. | 886 // VirtualAlloc rounds allocated size to page size automatically. |
856 size_t msize = RoundUp(requested, static_cast<int>(GetPageSize())); | 887 size_t msize = RoundUp(requested, static_cast<int>(GetPageSize())); |
857 intptr_t address = 0; | |
858 | 888 |
859 // Windows XP SP2 allows Data Excution Prevention (DEP). | 889 // Windows XP SP2 allows Data Excution Prevention (DEP). |
860 int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; | 890 int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; |
861 | 891 |
862 // For exectutable pages try and randomize the allocation address | 892 LPVOID mbase = RandomizedVirtualAlloc(msize, |
863 if (prot == PAGE_EXECUTE_READWRITE && | 893 MEM_COMMIT | MEM_RESERVE, |
864 msize >= static_cast<size_t>(Page::kPageSize)) { | 894 prot); |
865 address = (V8::RandomPrivate(Isolate::Current()) << kPageSizeBits) | |
866 | kAllocationRandomAddressMin; | |
867 address &= kAllocationRandomAddressMax; | |
868 } | |
869 | |
870 LPVOID mbase = VirtualAlloc(reinterpret_cast<void *>(address), | |
871 msize, | |
872 MEM_COMMIT | MEM_RESERVE, | |
873 prot); | |
874 if (mbase == NULL && address != 0) | |
875 mbase = VirtualAlloc(NULL, msize, MEM_COMMIT | MEM_RESERVE, prot); | |
876 | 895 |
877 if (mbase == NULL) { | 896 if (mbase == NULL) { |
878 LOG(ISOLATE, StringEvent("OS::Allocate", "VirtualAlloc failed")); | 897 LOG(ISOLATE, StringEvent("OS::Allocate", "VirtualAlloc failed")); |
879 return NULL; | 898 return NULL; |
880 } | 899 } |
881 | 900 |
882 ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment())); | 901 ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment())); |
883 | 902 |
884 *allocated = msize; | 903 *allocated = msize; |
885 UpdateAllocatedSpaceLimits(mbase, static_cast<int>(msize)); | 904 UpdateAllocatedSpaceLimits(mbase, static_cast<int>(msize)); |
(...skipping 578 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1464 } | 1483 } |
1465 | 1484 |
1466 | 1485 |
1467 bool VirtualMemory::Uncommit(void* address, size_t size) { | 1486 bool VirtualMemory::Uncommit(void* address, size_t size) { |
1468 ASSERT(IsReserved()); | 1487 ASSERT(IsReserved()); |
1469 return UncommitRegion(address, size); | 1488 return UncommitRegion(address, size); |
1470 } | 1489 } |
1471 | 1490 |
1472 | 1491 |
1473 void* VirtualMemory::ReserveRegion(size_t size) { | 1492 void* VirtualMemory::ReserveRegion(size_t size) { |
1474 return VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS); | 1493 return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS); |
1475 } | 1494 } |
1476 | 1495 |
1477 | 1496 |
1478 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { | 1497 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { |
1479 int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; | 1498 int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; |
1480 if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) { | 1499 if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) { |
1481 return false; | 1500 return false; |
1482 } | 1501 } |
1483 | 1502 |
1484 UpdateAllocatedSpaceLimits(base, static_cast<int>(size)); | 1503 UpdateAllocatedSpaceLimits(base, static_cast<int>(size)); |
(...skipping 15 matching lines...) Expand all Loading... |
1500 bool VirtualMemory::UncommitRegion(void* base, size_t size) { | 1519 bool VirtualMemory::UncommitRegion(void* base, size_t size) { |
1501 return VirtualFree(base, size, MEM_DECOMMIT) != 0; | 1520 return VirtualFree(base, size, MEM_DECOMMIT) != 0; |
1502 } | 1521 } |
1503 | 1522 |
1504 | 1523 |
1505 bool VirtualMemory::ReleaseRegion(void* base, size_t size) { | 1524 bool VirtualMemory::ReleaseRegion(void* base, size_t size) { |
1506 return VirtualFree(base, 0, MEM_RELEASE) != 0; | 1525 return VirtualFree(base, 0, MEM_RELEASE) != 0; |
1507 } | 1526 } |
1508 | 1527 |
1509 | 1528 |
1510 | |
1511 // ---------------------------------------------------------------------------- | 1529 // ---------------------------------------------------------------------------- |
1512 // Win32 thread support. | 1530 // Win32 thread support. |
1513 | 1531 |
1514 // Definition of invalid thread handle and id. | 1532 // Definition of invalid thread handle and id. |
1515 static const HANDLE kNoThread = INVALID_HANDLE_VALUE; | 1533 static const HANDLE kNoThread = INVALID_HANDLE_VALUE; |
1516 | 1534 |
1517 // Entry point for threads. The supplied argument is a pointer to the thread | 1535 // Entry point for threads. The supplied argument is a pointer to the thread |
1518 // object. The entry function dispatches to the run method in the thread | 1536 // object. The entry function dispatches to the run method in the thread |
1519 // object. It is important that this function has __stdcall calling | 1537 // object. It is important that this function has __stdcall calling |
1520 // convention. | 1538 // convention. |
(...skipping 528 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2049 | 2067 |
2050 | 2068 |
2051 void Sampler::Stop() { | 2069 void Sampler::Stop() { |
2052 ASSERT(IsActive()); | 2070 ASSERT(IsActive()); |
2053 SamplerThread::RemoveActiveSampler(this); | 2071 SamplerThread::RemoveActiveSampler(this); |
2054 SetActive(false); | 2072 SetActive(false); |
2055 } | 2073 } |
2056 | 2074 |
2057 | 2075 |
2058 } } // namespace v8::internal | 2076 } } // namespace v8::internal |
OLD | NEW |