| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 297 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 308 // Maximum size of the virtual memory. 0 means there is no artificial | 308 // Maximum size of the virtual memory. 0 means there is no artificial |
| 309 // limit. | 309 // limit. |
| 310 static intptr_t MaxVirtualMemory(); | 310 static intptr_t MaxVirtualMemory(); |
| 311 | 311 |
| 312 // Returns the double constant NAN | 312 // Returns the double constant NAN |
| 313 static double nan_value(); | 313 static double nan_value(); |
| 314 | 314 |
| 315 // Support runtime detection of Cpu implementer | 315 // Support runtime detection of Cpu implementer |
| 316 static CpuImplementer GetCpuImplementer(); | 316 static CpuImplementer GetCpuImplementer(); |
| 317 | 317 |
| 318 // Support runtime detection of Cpu implementer |
| 319 static CpuPart GetCpuPart(CpuImplementer implementer); |
| 320 |
| 318 // Support runtime detection of VFP3 on ARM CPUs. | 321 // Support runtime detection of VFP3 on ARM CPUs. |
| 319 static bool ArmCpuHasFeature(CpuFeature feature); | 322 static bool ArmCpuHasFeature(CpuFeature feature); |
| 320 | 323 |
| 321 // Support runtime detection of whether the hard float option of the | 324 // Support runtime detection of whether the hard float option of the |
| 322 // EABI is used. | 325 // EABI is used. |
| 323 static bool ArmUsingHardFloat(); | 326 static bool ArmUsingHardFloat(); |
| 324 | 327 |
| 325 // Support runtime detection of FPU on MIPS CPUs. | 328 // Support runtime detection of FPU on MIPS CPUs. |
| 326 static bool MipsCpuHasFeature(CpuFeature feature); | 329 static bool MipsCpuHasFeature(CpuFeature feature); |
| 327 | 330 |
| 328 // Returns the activation frame alignment constraint or zero if | 331 // Returns the activation frame alignment constraint or zero if |
| 329 // the platform doesn't care. Guaranteed to be a power of two. | 332 // the platform doesn't care. Guaranteed to be a power of two. |
| 330 static int ActivationFrameAlignment(); | 333 static int ActivationFrameAlignment(); |
| 331 | 334 |
| 332 #if defined(V8_TARGET_ARCH_IA32) | 335 #if defined(V8_TARGET_ARCH_IA32) |
| 333 // Limit below which the extra overhead of the MemCopy function is likely | 336 // Limit below which the extra overhead of the MemCopy function is likely |
| 334 // to outweigh the benefits of faster copying. | 337 // to outweigh the benefits of faster copying. |
| 335 static const int kMinComplexMemCopy = 64; | 338 static const int kMinComplexMemCopy = 64; |
| 336 | 339 |
| 337 // Copy memory area. No restrictions. | 340 // Copy memory area. No restrictions. |
| 338 static void MemMove(void* dest, const void* src, size_t size); | 341 static void MemMove(void* dest, const void* src, size_t size); |
| 339 typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size); | 342 typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size); |
| 340 | 343 |
| 341 // Keep the distinction of "move" vs. "copy" for the benefit of other | 344 // Keep the distinction of "move" vs. "copy" for the benefit of other |
| 342 // architectures. | 345 // architectures. |
| 343 static void MemCopy(void* dest, const void* src, size_t size) { | 346 static void MemCopy(void* dest, const void* src, size_t size) { |
| 344 MemMove(dest, src, size); | 347 MemMove(dest, src, size); |
| 345 } | 348 } |
| 346 #else // V8_TARGET_ARCH_IA32 | 349 #elif defined(V8_HOST_ARCH_ARM) |
| 350 typedef void (*MemCopyUint8Function)(uint8_t* dest, |
| 351 const uint8_t* src, |
| 352 size_t size); |
| 353 static MemCopyUint8Function memcopy_uint8_function; |
| 354 static void MemCopyUint8Wrapper(uint8_t* dest, |
| 355 const uint8_t* src, |
| 356 size_t chars) { |
| 357 memcpy(dest, src, chars); |
| 358 } |
| 359 // For values < 16, the assembler function is slower than the inlined C code. |
| 360 static const int kMinComplexMemCopy = 16; |
| 361 static void MemCopy(void* dest, const void* src, size_t size) { |
| 362 (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest), |
| 363 reinterpret_cast<const uint8_t*>(src), |
| 364 size); |
| 365 } |
| 366 static void MemMove(void* dest, const void* src, size_t size) { |
| 367 memmove(dest, src, size); |
| 368 } |
| 369 |
| 370 typedef void (*MemCopyUint16Uint8Function)(uint16_t* dest, |
| 371 const uint8_t* src, |
| 372 size_t size); |
| 373 static MemCopyUint16Uint8Function memcopy_uint16_uint8_function; |
| 374 static void MemCopyUint16Uint8Wrapper(uint16_t* dest, |
| 375 const uint8_t* src, |
| 376 size_t chars); |
| 377 // For values < 12, the assembler function is slower than the inlined C code. |
| 378 static const int kMinComplexConvertMemCopy = 12; |
| 379 static void MemCopyUint16Uint8(uint16_t* dest, |
| 380 const uint8_t* src, |
| 381 size_t size) { |
| 382 (*memcopy_uint16_uint8_function)(dest, src, size); |
| 383 } |
| 384 #else |
| 347 // Copy memory area to disjoint memory area. | 385 // Copy memory area to disjoint memory area. |
| 348 static void MemCopy(void* dest, const void* src, size_t size) { | 386 static void MemCopy(void* dest, const void* src, size_t size) { |
| 349 memcpy(dest, src, size); | 387 memcpy(dest, src, size); |
| 350 } | 388 } |
| 351 static void MemMove(void* dest, const void* src, size_t size) { | 389 static void MemMove(void* dest, const void* src, size_t size) { |
| 352 memmove(dest, src, size); | 390 memmove(dest, src, size); |
| 353 } | 391 } |
| 354 static const int kMinComplexMemCopy = 16 * kPointerSize; | 392 static const int kMinComplexMemCopy = 16 * kPointerSize; |
| 355 #endif // V8_TARGET_ARCH_IA32 | 393 #endif // V8_TARGET_ARCH_IA32 |
| 356 | 394 |
| (...skipping 369 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 726 static uint16_t HToN(uint16_t value); | 764 static uint16_t HToN(uint16_t value); |
| 727 static uint16_t NToH(uint16_t value); | 765 static uint16_t NToH(uint16_t value); |
| 728 static uint32_t HToN(uint32_t value); | 766 static uint32_t HToN(uint32_t value); |
| 729 static uint32_t NToH(uint32_t value); | 767 static uint32_t NToH(uint32_t value); |
| 730 }; | 768 }; |
| 731 | 769 |
| 732 | 770 |
| 733 } } // namespace v8::internal | 771 } } // namespace v8::internal |
| 734 | 772 |
| 735 #endif // V8_PLATFORM_H_ | 773 #endif // V8_PLATFORM_H_ |
| OLD | NEW |