Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(268)

Side by Side Diff: content/browser/speech/speech_recognizer_impl.cc

Issue 16294003: Update content/ to use scoped_refptr<T>::get() rather than implicit "operator T*" (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Rebased Created 7 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/browser/speech/speech_recognizer_impl.h" 5 #include "content/browser/speech/speech_recognizer_impl.h"
6 6
7 #include "base/basictypes.h" 7 #include "base/basictypes.h"
8 #include "base/bind.h" 8 #include "base/bind.h"
9 #include "base/time.h" 9 #include "base/time.h"
10 #include "content/browser/browser_main_loop.h" 10 #include "content/browser/browser_main_loop.h"
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after
144 return is_capturing_audio; 144 return is_capturing_audio;
145 } 145 }
146 146
147 const SpeechRecognitionEngine& 147 const SpeechRecognitionEngine&
148 SpeechRecognizerImpl::recognition_engine() const { 148 SpeechRecognizerImpl::recognition_engine() const {
149 return *(recognition_engine_.get()); 149 return *(recognition_engine_.get());
150 } 150 }
151 151
152 SpeechRecognizerImpl::~SpeechRecognizerImpl() { 152 SpeechRecognizerImpl::~SpeechRecognizerImpl() {
153 endpointer_.EndSession(); 153 endpointer_.EndSession();
154 if (audio_controller_) { 154 if (audio_controller_.get()) {
155 audio_controller_->Close(base::Bind(&KeepAudioControllerRefcountedForDtor, 155 audio_controller_->Close(
156 audio_controller_)); 156 base::Bind(&KeepAudioControllerRefcountedForDtor, audio_controller_));
157 } 157 }
158 } 158 }
159 159
160 // Invoked in the audio thread. 160 // Invoked in the audio thread.
161 void SpeechRecognizerImpl::OnError(AudioInputController* controller) { 161 void SpeechRecognizerImpl::OnError(AudioInputController* controller) {
162 FSMEventArgs event_args(EVENT_AUDIO_ERROR); 162 FSMEventArgs event_args(EVENT_AUDIO_ERROR);
163 BrowserThread::PostTask(BrowserThread::IO, FROM_HERE, 163 BrowserThread::PostTask(BrowserThread::IO, FROM_HERE,
164 base::Bind(&SpeechRecognizerImpl::DispatchEvent, 164 base::Bind(&SpeechRecognizerImpl::DispatchEvent,
165 this, event_args)); 165 this, event_args));
166 } 166 }
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
218 // Event dispatching must be sequential, otherwise it will break all the rules 218 // Event dispatching must be sequential, otherwise it will break all the rules
219 // and the assumptions of the finite state automata model. 219 // and the assumptions of the finite state automata model.
220 DCHECK(!is_dispatching_event_); 220 DCHECK(!is_dispatching_event_);
221 is_dispatching_event_ = true; 221 is_dispatching_event_ = true;
222 222
223 // Guard against the delegate freeing us until we finish processing the event. 223 // Guard against the delegate freeing us until we finish processing the event.
224 scoped_refptr<SpeechRecognizerImpl> me(this); 224 scoped_refptr<SpeechRecognizerImpl> me(this);
225 225
226 if (event_args.event == EVENT_AUDIO_DATA) { 226 if (event_args.event == EVENT_AUDIO_DATA) {
227 DCHECK(event_args.audio_data.get() != NULL); 227 DCHECK(event_args.audio_data.get() != NULL);
228 ProcessAudioPipeline(*event_args.audio_data); 228 ProcessAudioPipeline(*event_args.audio_data.get());
229 } 229 }
230 230
231 // The audio pipeline must be processed before the event dispatch, otherwise 231 // The audio pipeline must be processed before the event dispatch, otherwise
232 // it would take actions according to the future state instead of the current. 232 // it would take actions according to the future state instead of the current.
233 state_ = ExecuteTransitionAndGetNextState(event_args); 233 state_ = ExecuteTransitionAndGetNextState(event_args);
234 is_dispatching_event_ = false; 234 is_dispatching_event_ = false;
235 } 235 }
236 236
237 SpeechRecognizerImpl::FSMState 237 SpeechRecognizerImpl::FSMState
238 SpeechRecognizerImpl::ExecuteTransitionAndGetNextState( 238 SpeechRecognizerImpl::ExecuteTransitionAndGetNextState(
(...skipping 193 matching lines...) Expand 10 before | Expand all | Expand 10 after
432 SpeechRecognizerImpl::StartRecognitionEngine(const FSMEventArgs& event_args) { 432 SpeechRecognizerImpl::StartRecognitionEngine(const FSMEventArgs& event_args) {
433 // This is the first audio packet captured, so the recognition engine is 433 // This is the first audio packet captured, so the recognition engine is
434 // started and the delegate notified about the event. 434 // started and the delegate notified about the event.
435 DCHECK(recognition_engine_.get() != NULL); 435 DCHECK(recognition_engine_.get() != NULL);
436 recognition_engine_->StartRecognition(); 436 recognition_engine_->StartRecognition();
437 listener_->OnAudioStart(session_id_); 437 listener_->OnAudioStart(session_id_);
438 438
439 // This is a little hack, since TakeAudioChunk() is already called by 439 // This is a little hack, since TakeAudioChunk() is already called by
440 // ProcessAudioPipeline(). It is the best tradeoff, unless we allow dropping 440 // ProcessAudioPipeline(). It is the best tradeoff, unless we allow dropping
441 // the first audio chunk captured after opening the audio device. 441 // the first audio chunk captured after opening the audio device.
442 recognition_engine_->TakeAudioChunk(*(event_args.audio_data)); 442 recognition_engine_->TakeAudioChunk(*(event_args.audio_data.get()));
443 return STATE_ESTIMATING_ENVIRONMENT; 443 return STATE_ESTIMATING_ENVIRONMENT;
444 } 444 }
445 445
446 SpeechRecognizerImpl::FSMState 446 SpeechRecognizerImpl::FSMState
447 SpeechRecognizerImpl::WaitEnvironmentEstimationCompletion(const FSMEventArgs&) { 447 SpeechRecognizerImpl::WaitEnvironmentEstimationCompletion(const FSMEventArgs&) {
448 DCHECK(endpointer_.IsEstimatingEnvironment()); 448 DCHECK(endpointer_.IsEstimatingEnvironment());
449 if (GetElapsedTimeMs() >= kEndpointerEstimationTimeMs) { 449 if (GetElapsedTimeMs() >= kEndpointerEstimationTimeMs) {
450 endpointer_.SetUserInputMode(); 450 endpointer_.SetUserInputMode();
451 listener_->OnEnvironmentEstimationComplete(session_id_); 451 listener_->OnEnvironmentEstimationComplete(session_id_);
452 return STATE_WAITING_FOR_SPEECH; 452 return STATE_WAITING_FOR_SPEECH;
(...skipping 206 matching lines...) Expand 10 before | Expand all | Expand 10 after
659 SpeechRecognizerImpl::FSMEventArgs::FSMEventArgs(FSMEvent event_value) 659 SpeechRecognizerImpl::FSMEventArgs::FSMEventArgs(FSMEvent event_value)
660 : event(event_value), 660 : event(event_value),
661 audio_data(NULL), 661 audio_data(NULL),
662 engine_error(SPEECH_RECOGNITION_ERROR_NONE) { 662 engine_error(SPEECH_RECOGNITION_ERROR_NONE) {
663 } 663 }
664 664
665 SpeechRecognizerImpl::FSMEventArgs::~FSMEventArgs() { 665 SpeechRecognizerImpl::FSMEventArgs::~FSMEventArgs() {
666 } 666 }
667 667
668 } // namespace content 668 } // namespace content
OLDNEW
« no previous file with comments | « content/browser/speech/speech_recognition_manager_impl.cc ('k') | content/browser/ssl/ssl_client_auth_handler.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698