OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
65 // Number of times a function has to be seen on the stack before it is | 65 // Number of times a function has to be seen on the stack before it is |
66 // optimized. | 66 // optimized. |
67 static const int kProfilerTicksBeforeOptimization = 2; | 67 static const int kProfilerTicksBeforeOptimization = 2; |
68 | 68 |
69 // Maximum size in bytes of generated code for a function to be optimized | 69 // Maximum size in bytes of generated code for a function to be optimized |
70 // the very first time it is seen on the stack. | 70 // the very first time it is seen on the stack. |
71 static const int kMaxSizeEarlyOpt = 500; | 71 static const int kMaxSizeEarlyOpt = 500; |
72 | 72 |
73 | 73 |
74 Atomic32 RuntimeProfiler::state_ = 0; | 74 Atomic32 RuntimeProfiler::state_ = 0; |
75 // TODO(isolates): Create the semaphore lazily and clean it up when no | 75 |
76 // longer required. | 76 // TODO(isolates): Clean up the semaphore when it is no longer required. |
77 Semaphore* RuntimeProfiler::semaphore_ = OS::CreateSemaphore(0); | 77 static LazySemaphore<0>::type semaphore = LAZY_SEMAPHORE_INITIALIZER; |
78 | 78 |
79 #ifdef DEBUG | 79 #ifdef DEBUG |
80 bool RuntimeProfiler::has_been_globally_set_up_ = false; | 80 bool RuntimeProfiler::has_been_globally_set_up_ = false; |
81 #endif | 81 #endif |
82 bool RuntimeProfiler::enabled_ = false; | 82 bool RuntimeProfiler::enabled_ = false; |
83 | 83 |
84 | 84 |
85 RuntimeProfiler::RuntimeProfiler(Isolate* isolate) | 85 RuntimeProfiler::RuntimeProfiler(Isolate* isolate) |
86 : isolate_(isolate), | 86 : isolate_(isolate), |
87 sampler_threshold_(kSamplerThresholdInit), | 87 sampler_threshold_(kSamplerThresholdInit), |
(...skipping 311 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
399 } | 399 } |
400 | 400 |
401 | 401 |
402 void RuntimeProfiler::HandleWakeUp(Isolate* isolate) { | 402 void RuntimeProfiler::HandleWakeUp(Isolate* isolate) { |
403 // The profiler thread must still be waiting. | 403 // The profiler thread must still be waiting. |
404 ASSERT(NoBarrier_Load(&state_) >= 0); | 404 ASSERT(NoBarrier_Load(&state_) >= 0); |
405 // In IsolateEnteredJS we have already incremented the counter and | 405 // In IsolateEnteredJS we have already incremented the counter and |
406 // undid the decrement done by the profiler thread. Increment again | 406 // undid the decrement done by the profiler thread. Increment again |
407 // to get the right count of active isolates. | 407 // to get the right count of active isolates. |
408 NoBarrier_AtomicIncrement(&state_, 1); | 408 NoBarrier_AtomicIncrement(&state_, 1); |
409 semaphore_->Signal(); | 409 semaphore.Pointer()->Signal(); |
410 } | 410 } |
411 | 411 |
412 | 412 |
413 bool RuntimeProfiler::IsSomeIsolateInJS() { | 413 bool RuntimeProfiler::IsSomeIsolateInJS() { |
414 return NoBarrier_Load(&state_) > 0; | 414 return NoBarrier_Load(&state_) > 0; |
415 } | 415 } |
416 | 416 |
417 | 417 |
418 bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() { | 418 bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() { |
419 Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1); | 419 Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1); |
420 ASSERT(old_state >= -1); | 420 ASSERT(old_state >= -1); |
421 if (old_state != 0) return false; | 421 if (old_state != 0) return false; |
422 semaphore_->Wait(); | 422 semaphore.Pointer()->Wait(); |
423 return true; | 423 return true; |
424 } | 424 } |
425 | 425 |
426 | 426 |
427 void RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(Thread* thread) { | 427 void RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(Thread* thread) { |
428 // Do a fake increment. If the profiler is waiting on the semaphore, | 428 // Do a fake increment. If the profiler is waiting on the semaphore, |
429 // the returned state is 0, which can be left as an initial state in | 429 // the returned state is 0, which can be left as an initial state in |
430 // case profiling is restarted later. If the profiler is not | 430 // case profiling is restarted later. If the profiler is not |
431 // waiting, the increment will prevent it from waiting, but has to | 431 // waiting, the increment will prevent it from waiting, but has to |
432 // be undone after the profiler is stopped. | 432 // be undone after the profiler is stopped. |
433 Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, 1); | 433 Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, 1); |
434 ASSERT(new_state >= 0); | 434 ASSERT(new_state >= 0); |
435 if (new_state == 0) { | 435 if (new_state == 0) { |
436 // The profiler thread is waiting. Wake it up. It must check for | 436 // The profiler thread is waiting. Wake it up. It must check for |
437 // stop conditions before attempting to wait again. | 437 // stop conditions before attempting to wait again. |
438 semaphore_->Signal(); | 438 semaphore.Pointer()->Signal(); |
439 } | 439 } |
440 thread->Join(); | 440 thread->Join(); |
441 // The profiler thread is now stopped. Undo the increment in case it | 441 // The profiler thread is now stopped. Undo the increment in case it |
442 // was not waiting. | 442 // was not waiting. |
443 if (new_state != 0) { | 443 if (new_state != 0) { |
444 NoBarrier_AtomicIncrement(&state_, -1); | 444 NoBarrier_AtomicIncrement(&state_, -1); |
445 } | 445 } |
446 } | 446 } |
447 | 447 |
448 | 448 |
(...skipping 17 matching lines...) Expand all Loading... |
466 | 466 |
467 bool RuntimeProfilerRateLimiter::SuspendIfNecessary() { | 467 bool RuntimeProfilerRateLimiter::SuspendIfNecessary() { |
468 if (!RuntimeProfiler::IsSomeIsolateInJS()) { | 468 if (!RuntimeProfiler::IsSomeIsolateInJS()) { |
469 return RuntimeProfiler::WaitForSomeIsolateToEnterJS(); | 469 return RuntimeProfiler::WaitForSomeIsolateToEnterJS(); |
470 } | 470 } |
471 return false; | 471 return false; |
472 } | 472 } |
473 | 473 |
474 | 474 |
475 } } // namespace v8::internal | 475 } } // namespace v8::internal |
OLD | NEW |