OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
73 } | 73 } |
74 } | 74 } |
75 } | 75 } |
76 | 76 |
77 | 77 |
78 void OptimizingCompilerThread::CompileNext() { | 78 void OptimizingCompilerThread::CompileNext() { |
79 OptimizingCompiler* optimizing_compiler = NULL; | 79 OptimizingCompiler* optimizing_compiler = NULL; |
80 input_queue_.Dequeue(&optimizing_compiler); | 80 input_queue_.Dequeue(&optimizing_compiler); |
81 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1)); | 81 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1)); |
82 | 82 |
83 ASSERT(optimizing_compiler->info()->closure()->IsInRecompileQueue()); | 83 // The function may have already been optimized by OSR. Simply continue. |
| 84 OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph(); |
| 85 USE(status); // Prevent an unused-variable error in release mode. |
| 86 ASSERT(status != OptimizingCompiler::FAILED); |
84 | 87 |
85 OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph(); | 88 // The function may have already been optimized by OSR. Simply continue. |
86 ASSERT(status != OptimizingCompiler::FAILED); | 89 // Mark it for installing before queuing so that we can be sure of the write |
87 // Prevent an unused-variable error in release mode. | 90 // order: marking first and (after being queued) installing code second. |
88 USE(status); | 91 optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode(); |
89 | |
90 output_queue_.Enqueue(optimizing_compiler); | 92 output_queue_.Enqueue(optimizing_compiler); |
91 | |
92 // The execution thread can call InstallOptimizedFunctions() at any time, | |
93 // including at this point, after queuing for install and before marking | |
94 // for install. To avoid race condition, functions that are queued but not | |
95 // yet marked for install are not processed by InstallOptimizedFunctions(). | |
96 | |
97 ASSERT(optimizing_compiler->info()->closure()->IsInRecompileQueue()); | |
98 // Mark function to generate and install optimized code. We assume this | |
99 // write to be atomic. | |
100 optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode(); | |
101 } | 93 } |
102 | 94 |
103 | 95 |
104 void OptimizingCompilerThread::Stop() { | 96 void OptimizingCompilerThread::Stop() { |
105 ASSERT(!IsOptimizerThread()); | 97 ASSERT(!IsOptimizerThread()); |
106 Release_Store(&stop_thread_, static_cast<AtomicWord>(true)); | 98 Release_Store(&stop_thread_, static_cast<AtomicWord>(true)); |
107 input_queue_semaphore_->Signal(); | 99 input_queue_semaphore_->Signal(); |
108 stop_semaphore_->Wait(); | 100 stop_semaphore_->Wait(); |
109 | 101 |
110 if (FLAG_parallel_recompilation_delay != 0) { | 102 if (FLAG_parallel_recompilation_delay != 0) { |
111 // Execution ended before we managed to compile and install the remaining | |
112 // functions in the queue. We still want to do that for debugging though. | |
113 // At this point the optimizing thread already stopped, so we finish | |
114 // processing the queue in the main thread. | |
115 InstallOptimizedFunctions(); | 103 InstallOptimizedFunctions(); |
116 // Barrier when loading queue length is not necessary since the write | 104 // Barrier when loading queue length is not necessary since the write |
117 // happens in CompileNext on the same thread. | 105 // happens in CompileNext on the same thread. |
118 while (NoBarrier_Load(&queue_length_) > 0) { | 106 while (NoBarrier_Load(&queue_length_) > 0) { |
119 CompileNext(); | 107 CompileNext(); |
120 InstallOptimizedFunctions(); | 108 InstallOptimizedFunctions(); |
121 } | 109 } |
122 } | 110 } |
123 | 111 |
124 if (FLAG_trace_parallel_recompilation) { | 112 if (FLAG_trace_parallel_recompilation) { |
125 double compile_time = static_cast<double>(time_spent_compiling_); | 113 double compile_time = static_cast<double>(time_spent_compiling_); |
126 double total_time = static_cast<double>(time_spent_total_); | 114 double total_time = static_cast<double>(time_spent_total_); |
127 double percentage = (compile_time * 100) / total_time; | 115 double percentage = (compile_time * 100) / total_time; |
128 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage); | 116 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage); |
129 } | 117 } |
130 } | 118 } |
131 | 119 |
132 | 120 |
133 void OptimizingCompilerThread::InstallOptimizedFunctions() { | 121 void OptimizingCompilerThread::InstallOptimizedFunctions() { |
134 ASSERT(!IsOptimizerThread()); | 122 ASSERT(!IsOptimizerThread()); |
135 HandleScope handle_scope(isolate_); | 123 HandleScope handle_scope(isolate_); |
136 int functions_installed = 0; | 124 int functions_installed = 0; |
137 while (!output_queue_.IsEmpty()) { | 125 while (!output_queue_.IsEmpty()) { |
138 OptimizingCompiler* compiler = *output_queue_.Peek(); | 126 OptimizingCompiler* compiler; |
139 | |
140 if (compiler->info()->closure()->IsInRecompileQueue()) { | |
141 // A function may be queued for install, but not marked as such yet. | |
142 // We continue with the output queue the next to avoid race condition. | |
143 break; | |
144 } | |
145 output_queue_.Dequeue(&compiler); | 127 output_queue_.Dequeue(&compiler); |
146 | |
147 #ifdef DEBUG | |
148 // Create new closure handle since the deferred handle is about to die. | |
149 Handle<JSFunction> closure(*compiler->info()->closure()); | |
150 #endif // DEBUG | |
151 | |
152 Compiler::InstallOptimizedCode(compiler); | 128 Compiler::InstallOptimizedCode(compiler); |
153 // Assert that the marker builtin has been replaced by actual code. | |
154 ASSERT(!closure->IsInRecompileQueue()); | |
155 functions_installed++; | 129 functions_installed++; |
156 } | 130 } |
157 } | 131 } |
158 | 132 |
159 | 133 |
160 void OptimizingCompilerThread::QueueForOptimization( | 134 void OptimizingCompilerThread::QueueForOptimization( |
161 OptimizingCompiler* optimizing_compiler) { | 135 OptimizingCompiler* optimizing_compiler) { |
162 ASSERT(IsQueueAvailable()); | 136 ASSERT(IsQueueAvailable()); |
163 ASSERT(!IsOptimizerThread()); | 137 ASSERT(!IsOptimizerThread()); |
164 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1)); | 138 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1)); |
| 139 optimizing_compiler->info()->closure()->MarkInRecompileQueue(); |
165 input_queue_.Enqueue(optimizing_compiler); | 140 input_queue_.Enqueue(optimizing_compiler); |
166 input_queue_semaphore_->Signal(); | 141 input_queue_semaphore_->Signal(); |
167 } | 142 } |
168 | 143 |
169 | 144 |
170 #ifdef DEBUG | 145 #ifdef DEBUG |
171 bool OptimizingCompilerThread::IsOptimizerThread() { | 146 bool OptimizingCompilerThread::IsOptimizerThread() { |
172 if (!FLAG_parallel_recompilation) return false; | 147 if (!FLAG_parallel_recompilation) return false; |
173 return ThreadId::Current().ToInteger() == thread_id_; | 148 return ThreadId::Current().ToInteger() == thread_id_; |
174 } | 149 } |
175 #endif | 150 #endif |
176 | 151 |
177 | 152 |
178 } } // namespace v8::internal | 153 } } // namespace v8::internal |
OLD | NEW |