OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
61 time_spent_total_ = OS::Ticks() - epoch; | 61 time_spent_total_ = OS::Ticks() - epoch; |
62 } | 62 } |
63 return; | 63 return; |
64 } | 64 } |
65 | 65 |
66 int64_t compiling_start = 0; | 66 int64_t compiling_start = 0; |
67 if (FLAG_trace_parallel_recompilation) compiling_start = OS::Ticks(); | 67 if (FLAG_trace_parallel_recompilation) compiling_start = OS::Ticks(); |
68 | 68 |
69 CompileNext(); | 69 CompileNext(); |
70 | 70 |
71 if (!FLAG_manual_parallel_recompilation) { | |
72 isolate_->stack_guard()->RequestCodeReadyEvent(); | |
73 } else { | |
74 // In manual mode, do not trigger a code ready event. | |
75 // Instead, wait for the optimized functions to be installed manually. | |
76 output_queue_semaphore_->Signal(); | |
77 } | |
78 | |
79 if (FLAG_trace_parallel_recompilation) { | 71 if (FLAG_trace_parallel_recompilation) { |
80 time_spent_compiling_ += OS::Ticks() - compiling_start; | 72 time_spent_compiling_ += OS::Ticks() - compiling_start; |
81 } | 73 } |
82 } | 74 } |
83 } | 75 } |
84 | 76 |
85 | 77 |
86 void OptimizingCompilerThread::CompileNext() { | 78 void OptimizingCompilerThread::CompileNext() { |
87 Heap::RelocationLock relocation_lock(isolate_->heap()); | 79 Heap::RelocationLock relocation_lock(isolate_->heap()); |
88 OptimizingCompiler* optimizing_compiler = NULL; | 80 OptimizingCompiler* optimizing_compiler = NULL; |
89 input_queue_.Dequeue(&optimizing_compiler); | 81 input_queue_.Dequeue(&optimizing_compiler); |
90 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1)); | 82 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1)); |
91 | 83 |
92 // Function may have been optimized meanwhile by OSR. | 84 ASSERT(optimizing_compiler->info()->closure()->IsInRecompileQueue()); |
93 if (FLAG_use_osr && | |
94 optimizing_compiler->info()->closure()->IsOptimized()) { | |
95 return; | |
96 } | |
97 | 85 |
98 OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph(); | 86 OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph(); |
99 ASSERT(status != OptimizingCompiler::FAILED); | 87 ASSERT(status != OptimizingCompiler::FAILED); |
100 // Prevent an unused-variable error in release mode. | 88 // Prevent an unused-variable error in release mode. |
101 USE(status); | 89 USE(status); |
102 | 90 |
103 output_queue_.Enqueue(optimizing_compiler); | 91 output_queue_.Enqueue(optimizing_compiler); |
92 | |
93 // After putting the finished function onto the install queue but before | |
Jakob Kummerow
2013/03/12 15:19:30
This comment is misleading. Maybe something like:
Yang
2013/03/12 18:03:38
Done.
| |
94 // marking via builtin, OptimizeNow in the execution thread may have | |
95 // already called InstallOptimizedFunctions. | |
96 | |
97 ASSERT(optimizing_compiler->info()->closure()->IsInRecompileQueue()); | |
98 // Mark function to generate and install optimized code. We assume this | |
99 // write to be atomic. | |
100 optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode(); | |
104 } | 101 } |
105 | 102 |
106 | 103 |
107 void OptimizingCompilerThread::Stop() { | 104 void OptimizingCompilerThread::Stop() { |
105 ASSERT(!IsOptimizerThread()); | |
108 Release_Store(&stop_thread_, static_cast<AtomicWord>(true)); | 106 Release_Store(&stop_thread_, static_cast<AtomicWord>(true)); |
109 input_queue_semaphore_->Signal(); | 107 input_queue_semaphore_->Signal(); |
110 stop_semaphore_->Wait(); | 108 stop_semaphore_->Wait(); |
111 | 109 |
112 if (FLAG_parallel_recompilation_delay != 0) { | 110 if (FLAG_parallel_recompilation_delay != 0) { |
113 // Execution ended before we managed to compile and install the remaining | 111 // Execution ended before we managed to compile and install the remaining |
114 // functions in the queue. We still want to do that for debugging though. | 112 // functions in the queue. We still want to do that for debugging though. |
115 // At this point the optimizing thread already stopped, so we finish | 113 // At this point the optimizing thread already stopped, so we finish |
116 // processing the queue in the main thread. | 114 // processing the queue in the main thread. |
117 InstallOptimizedFunctions(); | 115 InstallOptimizedFunctions(); |
118 // Barrier when loading queue length is not necessary since the write | 116 // Barrier when loading queue length is not necessary since the write |
119 // happens in CompileNext on the same thread. | 117 // happens in CompileNext on the same thread. |
120 while (NoBarrier_Load(&queue_length_) > 0) { | 118 while (NoBarrier_Load(&queue_length_) > 0) { |
121 CompileNext(); | 119 CompileNext(); |
122 InstallOptimizedFunctions(); | 120 InstallOptimizedFunctions(); |
123 } | 121 } |
124 } | 122 } |
125 | 123 |
126 if (FLAG_trace_parallel_recompilation) { | 124 if (FLAG_trace_parallel_recompilation) { |
127 double compile_time = static_cast<double>(time_spent_compiling_); | 125 double compile_time = static_cast<double>(time_spent_compiling_); |
128 double total_time = static_cast<double>(time_spent_total_); | 126 double total_time = static_cast<double>(time_spent_total_); |
129 double percentage = (compile_time * 100) / total_time; | 127 double percentage = (compile_time * 100) / total_time; |
130 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage); | 128 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage); |
131 } | 129 } |
132 } | 130 } |
133 | 131 |
134 | 132 |
135 void OptimizingCompilerThread::InstallOptimizedFunctions() { | 133 void OptimizingCompilerThread::InstallOptimizedFunctions() { |
134 ASSERT(!IsOptimizerThread()); | |
136 HandleScope handle_scope(isolate_); | 135 HandleScope handle_scope(isolate_); |
137 int functions_installed = 0; | 136 int functions_installed = 0; |
138 while (!output_queue_.IsEmpty()) { | 137 while (!output_queue_.IsEmpty()) { |
139 if (FLAG_manual_parallel_recompilation) { | 138 OptimizingCompiler* compiler = *output_queue_.Peek(); |
140 output_queue_semaphore_->Wait(); | 139 |
140 if (compiler->info()->closure()->IsInRecompileQueue()) { | |
141 // A function may be queued for installing, but not been marked as such | |
142 // just yet, but still marked as in queue. We continue with the output | |
143 // queue the next time. Otherwise we may encounter a race condition. | |
144 break; | |
141 } | 145 } |
142 OptimizingCompiler* compiler = NULL; | |
143 output_queue_.Dequeue(&compiler); | 146 output_queue_.Dequeue(&compiler); |
147 | |
148 #ifdef DEBUG | |
149 // Create new closure handle since the deferred handle is about to die. | |
150 Handle<JSFunction> closure(*compiler->info()->closure()); | |
151 #endif // DEBUG | |
152 | |
144 Compiler::InstallOptimizedCode(compiler); | 153 Compiler::InstallOptimizedCode(compiler); |
154 // Assert that the marker builtin has been replaced by actual code. | |
155 ASSERT(!closure->IsInRecompileQueue()); | |
145 functions_installed++; | 156 functions_installed++; |
146 } | 157 } |
147 if (FLAG_trace_parallel_recompilation && functions_installed != 0) { | |
148 PrintF(" ** Installed %d function(s).\n", functions_installed); | |
149 } | |
150 } | |
151 | |
152 | |
153 Handle<SharedFunctionInfo> | |
154 OptimizingCompilerThread::InstallNextOptimizedFunction() { | |
155 ASSERT(FLAG_manual_parallel_recompilation || | |
156 FLAG_parallel_recompilation_delay != 0); | |
157 output_queue_semaphore_->Wait(); | |
158 OptimizingCompiler* compiler = NULL; | |
159 output_queue_.Dequeue(&compiler); | |
160 // Copy a handle from deferred handle scope to the normal handle scope. | |
161 Handle<SharedFunctionInfo> shared(*compiler->info()->shared_info()); | |
162 Compiler::InstallOptimizedCode(compiler); | |
163 return shared; | |
164 } | 158 } |
165 | 159 |
166 | 160 |
167 void OptimizingCompilerThread::QueueForOptimization( | 161 void OptimizingCompilerThread::QueueForOptimization( |
168 OptimizingCompiler* optimizing_compiler) { | 162 OptimizingCompiler* optimizing_compiler) { |
169 ASSERT(IsQueueAvailable()); | 163 ASSERT(IsQueueAvailable()); |
164 ASSERT(!IsOptimizerThread()); | |
170 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1)); | 165 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1)); |
171 input_queue_.Enqueue(optimizing_compiler); | 166 input_queue_.Enqueue(optimizing_compiler); |
172 input_queue_semaphore_->Signal(); | 167 input_queue_semaphore_->Signal(); |
173 } | 168 } |
174 | 169 |
170 | |
175 #ifdef DEBUG | 171 #ifdef DEBUG |
176 bool OptimizingCompilerThread::IsOptimizerThread() { | 172 bool OptimizingCompilerThread::IsOptimizerThread() { |
177 if (!FLAG_parallel_recompilation) return false; | 173 if (!FLAG_parallel_recompilation) return false; |
178 return ThreadId::Current().ToInteger() == thread_id_; | 174 return ThreadId::Current().ToInteger() == thread_id_; |
179 } | 175 } |
180 #endif | 176 #endif |
181 | 177 |
182 | 178 |
183 } } // namespace v8::internal | 179 } } // namespace v8::internal |
OLD | NEW |