Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/optimizing-compiler-thread.cc

Issue 12488006: Parallel recompilation: remove interrupt for code generation. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: addressed comments Created 7 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/optimizing-compiler-thread.h ('k') | src/runtime.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
61 time_spent_total_ = OS::Ticks() - epoch; 61 time_spent_total_ = OS::Ticks() - epoch;
62 } 62 }
63 return; 63 return;
64 } 64 }
65 65
66 int64_t compiling_start = 0; 66 int64_t compiling_start = 0;
67 if (FLAG_trace_parallel_recompilation) compiling_start = OS::Ticks(); 67 if (FLAG_trace_parallel_recompilation) compiling_start = OS::Ticks();
68 68
69 CompileNext(); 69 CompileNext();
70 70
71 if (!FLAG_manual_parallel_recompilation) {
72 isolate_->stack_guard()->RequestCodeReadyEvent();
73 } else {
74 // In manual mode, do not trigger a code ready event.
75 // Instead, wait for the optimized functions to be installed manually.
76 output_queue_semaphore_->Signal();
77 }
78
79 if (FLAG_trace_parallel_recompilation) { 71 if (FLAG_trace_parallel_recompilation) {
80 time_spent_compiling_ += OS::Ticks() - compiling_start; 72 time_spent_compiling_ += OS::Ticks() - compiling_start;
81 } 73 }
82 } 74 }
83 } 75 }
84 76
85 77
86 void OptimizingCompilerThread::CompileNext() { 78 void OptimizingCompilerThread::CompileNext() {
87 Heap::RelocationLock relocation_lock(isolate_->heap()); 79 Heap::RelocationLock relocation_lock(isolate_->heap());
88 OptimizingCompiler* optimizing_compiler = NULL; 80 OptimizingCompiler* optimizing_compiler = NULL;
89 input_queue_.Dequeue(&optimizing_compiler); 81 input_queue_.Dequeue(&optimizing_compiler);
90 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1)); 82 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
91 83
92 // Function may have been optimized meanwhile by OSR. 84 ASSERT(optimizing_compiler->info()->closure()->IsInRecompileQueue());
93 if (FLAG_use_osr &&
94 optimizing_compiler->info()->closure()->IsOptimized()) {
95 return;
96 }
97 85
98 OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph(); 86 OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph();
99 ASSERT(status != OptimizingCompiler::FAILED); 87 ASSERT(status != OptimizingCompiler::FAILED);
100 // Prevent an unused-variable error in release mode. 88 // Prevent an unused-variable error in release mode.
101 USE(status); 89 USE(status);
102 90
103 output_queue_.Enqueue(optimizing_compiler); 91 output_queue_.Enqueue(optimizing_compiler);
92
93 // The execution thread can call InstallOptimizedFunctions() at any time,
94 // including at this point, after queuing for install and before marking
95 // for install. To avoid race condition, functions that are queued but not
96 // yet marked for install are not processed by InstallOptimizedFunctions().
97
98 ASSERT(optimizing_compiler->info()->closure()->IsInRecompileQueue());
99 // Mark function to generate and install optimized code. We assume this
100 // write to be atomic.
101 optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode();
104 } 102 }
105 103
106 104
107 void OptimizingCompilerThread::Stop() { 105 void OptimizingCompilerThread::Stop() {
106 ASSERT(!IsOptimizerThread());
108 Release_Store(&stop_thread_, static_cast<AtomicWord>(true)); 107 Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
109 input_queue_semaphore_->Signal(); 108 input_queue_semaphore_->Signal();
110 stop_semaphore_->Wait(); 109 stop_semaphore_->Wait();
111 110
112 if (FLAG_parallel_recompilation_delay != 0) { 111 if (FLAG_parallel_recompilation_delay != 0) {
113 // Execution ended before we managed to compile and install the remaining 112 // Execution ended before we managed to compile and install the remaining
114 // functions in the queue. We still want to do that for debugging though. 113 // functions in the queue. We still want to do that for debugging though.
115 // At this point the optimizing thread already stopped, so we finish 114 // At this point the optimizing thread already stopped, so we finish
116 // processing the queue in the main thread. 115 // processing the queue in the main thread.
117 InstallOptimizedFunctions(); 116 InstallOptimizedFunctions();
118 // Barrier when loading queue length is not necessary since the write 117 // Barrier when loading queue length is not necessary since the write
119 // happens in CompileNext on the same thread. 118 // happens in CompileNext on the same thread.
120 while (NoBarrier_Load(&queue_length_) > 0) { 119 while (NoBarrier_Load(&queue_length_) > 0) {
121 CompileNext(); 120 CompileNext();
122 InstallOptimizedFunctions(); 121 InstallOptimizedFunctions();
123 } 122 }
124 } 123 }
125 124
126 if (FLAG_trace_parallel_recompilation) { 125 if (FLAG_trace_parallel_recompilation) {
127 double compile_time = static_cast<double>(time_spent_compiling_); 126 double compile_time = static_cast<double>(time_spent_compiling_);
128 double total_time = static_cast<double>(time_spent_total_); 127 double total_time = static_cast<double>(time_spent_total_);
129 double percentage = (compile_time * 100) / total_time; 128 double percentage = (compile_time * 100) / total_time;
130 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage); 129 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
131 } 130 }
132 } 131 }
133 132
134 133
135 void OptimizingCompilerThread::InstallOptimizedFunctions() { 134 void OptimizingCompilerThread::InstallOptimizedFunctions() {
135 ASSERT(!IsOptimizerThread());
136 HandleScope handle_scope(isolate_); 136 HandleScope handle_scope(isolate_);
137 int functions_installed = 0; 137 int functions_installed = 0;
138 while (!output_queue_.IsEmpty()) { 138 while (!output_queue_.IsEmpty()) {
139 if (FLAG_manual_parallel_recompilation) { 139 OptimizingCompiler* compiler = *output_queue_.Peek();
140 output_queue_semaphore_->Wait(); 140
141 if (compiler->info()->closure()->IsInRecompileQueue()) {
142 // A function may be queued for install, but not marked as such yet.
143 // We continue with the output queue the next to avoid race condition.
144 break;
141 } 145 }
142 OptimizingCompiler* compiler = NULL;
143 output_queue_.Dequeue(&compiler); 146 output_queue_.Dequeue(&compiler);
147
148 #ifdef DEBUG
149 // Create new closure handle since the deferred handle is about to die.
150 Handle<JSFunction> closure(*compiler->info()->closure());
151 #endif // DEBUG
152
144 Compiler::InstallOptimizedCode(compiler); 153 Compiler::InstallOptimizedCode(compiler);
154 // Assert that the marker builtin has been replaced by actual code.
155 ASSERT(!closure->IsInRecompileQueue());
145 functions_installed++; 156 functions_installed++;
146 } 157 }
147 if (FLAG_trace_parallel_recompilation && functions_installed != 0) {
148 PrintF(" ** Installed %d function(s).\n", functions_installed);
149 }
150 }
151
152
153 Handle<SharedFunctionInfo>
154 OptimizingCompilerThread::InstallNextOptimizedFunction() {
155 ASSERT(FLAG_manual_parallel_recompilation ||
156 FLAG_parallel_recompilation_delay != 0);
157 output_queue_semaphore_->Wait();
158 OptimizingCompiler* compiler = NULL;
159 output_queue_.Dequeue(&compiler);
160 // Copy a handle from deferred handle scope to the normal handle scope.
161 Handle<SharedFunctionInfo> shared(*compiler->info()->shared_info());
162 Compiler::InstallOptimizedCode(compiler);
163 return shared;
164 } 158 }
165 159
166 160
167 void OptimizingCompilerThread::QueueForOptimization( 161 void OptimizingCompilerThread::QueueForOptimization(
168 OptimizingCompiler* optimizing_compiler) { 162 OptimizingCompiler* optimizing_compiler) {
169 ASSERT(IsQueueAvailable()); 163 ASSERT(IsQueueAvailable());
164 ASSERT(!IsOptimizerThread());
170 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1)); 165 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1));
171 input_queue_.Enqueue(optimizing_compiler); 166 input_queue_.Enqueue(optimizing_compiler);
172 input_queue_semaphore_->Signal(); 167 input_queue_semaphore_->Signal();
173 } 168 }
174 169
170
175 #ifdef DEBUG 171 #ifdef DEBUG
176 bool OptimizingCompilerThread::IsOptimizerThread() { 172 bool OptimizingCompilerThread::IsOptimizerThread() {
177 if (!FLAG_parallel_recompilation) return false; 173 if (!FLAG_parallel_recompilation) return false;
178 return ThreadId::Current().ToInteger() == thread_id_; 174 return ThreadId::Current().ToInteger() == thread_id_;
179 } 175 }
180 #endif 176 #endif
181 177
182 178
183 } } // namespace v8::internal 179 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/optimizing-compiler-thread.h ('k') | src/runtime.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698