Chromium Code Reviews| Index: src/optimizing-compiler-thread.cc |
| diff --git a/src/optimizing-compiler-thread.cc b/src/optimizing-compiler-thread.cc |
| index bc26442ea2391012e8fd681ca9fafcedc3663c50..3a6286e96261086d3ad9b1e62b6a6cba16f61ac7 100644 |
| --- a/src/optimizing-compiler-thread.cc |
| +++ b/src/optimizing-compiler-thread.cc |
| @@ -68,14 +68,6 @@ void OptimizingCompilerThread::Run() { |
| CompileNext(); |
| - if (!FLAG_manual_parallel_recompilation) { |
| - isolate_->stack_guard()->RequestCodeReadyEvent(); |
| - } else { |
| - // In manual mode, do not trigger a code ready event. |
| - // Instead, wait for the optimized functions to be installed manually. |
| - output_queue_semaphore_->Signal(); |
| - } |
| - |
| if (FLAG_trace_parallel_recompilation) { |
| time_spent_compiling_ += OS::Ticks() - compiling_start; |
| } |
| @@ -89,11 +81,7 @@ void OptimizingCompilerThread::CompileNext() { |
| input_queue_.Dequeue(&optimizing_compiler); |
| Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1)); |
| - // Function may have been optimized meanwhile by OSR. |
| - if (FLAG_use_osr && |
| - optimizing_compiler->info()->closure()->IsOptimized()) { |
| - return; |
| - } |
| + ASSERT(optimizing_compiler->info()->closure()->IsInRecompileQueue()); |
| OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph(); |
| ASSERT(status != OptimizingCompiler::FAILED); |
| @@ -101,10 +89,20 @@ void OptimizingCompilerThread::CompileNext() { |
| USE(status); |
| output_queue_.Enqueue(optimizing_compiler); |
| + |
| + // After putting the finished function onto the install queue but before |
|
Jakob Kummerow
2013/03/12 15:19:30
This comment is misleading. Maybe something like:
Yang
2013/03/12 18:03:38
Done.
|
| + // marking via builtin, OptimizeNow in the execution thread may have |
| + // already called InstallOptimizedFunctions. |
| + |
| + ASSERT(optimizing_compiler->info()->closure()->IsInRecompileQueue()); |
| + // Mark function to generate and install optimized code. We assume this |
| + // write to be atomic. |
| + optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode(); |
| } |
| void OptimizingCompilerThread::Stop() { |
| + ASSERT(!IsOptimizerThread()); |
| Release_Store(&stop_thread_, static_cast<AtomicWord>(true)); |
| input_queue_semaphore_->Signal(); |
| stop_semaphore_->Wait(); |
| @@ -133,45 +131,43 @@ void OptimizingCompilerThread::Stop() { |
| void OptimizingCompilerThread::InstallOptimizedFunctions() { |
| + ASSERT(!IsOptimizerThread()); |
| HandleScope handle_scope(isolate_); |
| int functions_installed = 0; |
| while (!output_queue_.IsEmpty()) { |
| - if (FLAG_manual_parallel_recompilation) { |
| - output_queue_semaphore_->Wait(); |
| + OptimizingCompiler* compiler = *output_queue_.Peek(); |
| + |
| + if (compiler->info()->closure()->IsInRecompileQueue()) { |
| + // A function may be queued for installing, but not been marked as such |
| + // just yet, but still marked as in queue. We continue with the output |
| + // queue the next time. Otherwise we may encounter a race condition. |
| + break; |
| } |
| - OptimizingCompiler* compiler = NULL; |
| output_queue_.Dequeue(&compiler); |
| + |
| +#ifdef DEBUG |
| + // Create new closure handle since the deferred handle is about to die. |
| + Handle<JSFunction> closure(*compiler->info()->closure()); |
| +#endif // DEBUG |
| + |
| Compiler::InstallOptimizedCode(compiler); |
| + // Assert that the marker builtin has been replaced by actual code. |
| + ASSERT(!closure->IsInRecompileQueue()); |
| functions_installed++; |
| } |
| - if (FLAG_trace_parallel_recompilation && functions_installed != 0) { |
| - PrintF(" ** Installed %d function(s).\n", functions_installed); |
| - } |
| -} |
| - |
| - |
| -Handle<SharedFunctionInfo> |
| - OptimizingCompilerThread::InstallNextOptimizedFunction() { |
| - ASSERT(FLAG_manual_parallel_recompilation || |
| - FLAG_parallel_recompilation_delay != 0); |
| - output_queue_semaphore_->Wait(); |
| - OptimizingCompiler* compiler = NULL; |
| - output_queue_.Dequeue(&compiler); |
| - // Copy a handle from deferred handle scope to the normal handle scope. |
| - Handle<SharedFunctionInfo> shared(*compiler->info()->shared_info()); |
| - Compiler::InstallOptimizedCode(compiler); |
| - return shared; |
| } |
| void OptimizingCompilerThread::QueueForOptimization( |
| OptimizingCompiler* optimizing_compiler) { |
| ASSERT(IsQueueAvailable()); |
| + ASSERT(!IsOptimizerThread()); |
| Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1)); |
| input_queue_.Enqueue(optimizing_compiler); |
| input_queue_semaphore_->Signal(); |
| } |
| + |
| #ifdef DEBUG |
| bool OptimizingCompilerThread::IsOptimizerThread() { |
| if (!FLAG_parallel_recompilation) return false; |