| OLD | NEW | 
|---|
| 1 // Copyright (c) 2011, Google Inc. | 1 // Copyright (c) 2011, Google Inc. | 
| 2 // All rights reserved. | 2 // All rights reserved. | 
| 3 // | 3 // | 
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without | 
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are | 
| 6 // met: | 6 // met: | 
| 7 // | 7 // | 
| 8 //     * Redistributions of source code must retain the above copyright | 8 //     * Redistributions of source code must retain the above copyright | 
| 9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. | 
| 10 //     * Redistributions in binary form must reproduce the above | 10 //     * Redistributions in binary form must reproduce the above | 
| (...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 75 namespace { | 75 namespace { | 
| 76 void EnsureNonLoop(void* node, void* next) { | 76 void EnsureNonLoop(void* node, void* next) { | 
| 77   // We only have time to do minimal checking.  We don't traverse the list, but | 77   // We only have time to do minimal checking.  We don't traverse the list, but | 
| 78   // only look for an immediate loop (cycle back to ourself). | 78   // only look for an immediate loop (cycle back to ourself). | 
| 79   if (node != next) return; | 79   if (node != next) return; | 
| 80   Log(kCrash, __FILE__, __LINE__, "Circular loop in list detected: ", next); | 80   Log(kCrash, __FILE__, __LINE__, "Circular loop in list detected: ", next); | 
| 81 } | 81 } | 
| 82 | 82 | 
| 83 inline void* MaskPtr(void* p) { | 83 inline void* MaskPtr(void* p) { | 
| 84   // Maximize ASLR entropy and guarantee the result is an invalid address. | 84   // Maximize ASLR entropy and guarantee the result is an invalid address. | 
| 85   const uintptr_t q = ~(reinterpret_cast<intptr_t>(TCMalloc_SystemAlloc) >> 13); | 85   const uintptr_t mask = ~(reinterpret_cast<uintptr_t>(TCMalloc_SystemAlloc) | 
|  | 86                            >> 13) | 1; | 
| 86   // Do not mask NULL pointers, otherwise we could leak address state. | 87   // Do not mask NULL pointers, otherwise we could leak address state. | 
| 87   if (p) | 88   if (p) | 
| 88     return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(p) ^ q); | 89     return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(p) ^ mask); | 
| 89   return p; | 90   return p; | 
| 90 } | 91 } | 
| 91 | 92 | 
| 92 inline void* UnmaskPtr(void* p) { | 93 inline void* UnmaskPtr(void* p) { | 
| 93   return MaskPtr(p); | 94   return MaskPtr(p); | 
| 94 } | 95 } | 
| 95 | 96 | 
| 96 // Returns value of the |previous| pointer w/out running a sanity | 97 // Returns value of the |previous| pointer w/out running a sanity | 
| 97 // check. | 98 // check. | 
| 98 inline void *FL_Previous_No_Check(void *t) { | 99 inline void *FL_Previous_No_Check(void *t) { | 
| (...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 237 | 238 | 
| 238 namespace { | 239 namespace { | 
| 239 | 240 | 
| 240 inline void FL_SetNext(void *t, void *n) { | 241 inline void FL_SetNext(void *t, void *n) { | 
| 241   tcmalloc::SLL_SetNext(t,n); | 242   tcmalloc::SLL_SetNext(t,n); | 
| 242 } | 243 } | 
| 243 | 244 | 
| 244 } | 245 } | 
| 245 | 246 | 
| 246 #endif // TCMALLOC_USE_DOUBLYLINKED_FREELIST | 247 #endif // TCMALLOC_USE_DOUBLYLINKED_FREELIST | 
| OLD | NEW | 
|---|