OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/renderer/speech_recognition_dispatcher.h" | |
6 | |
7 #include "base/basictypes.h" | |
8 #include "base/utf_string_conversions.h" | |
9 #include "content/common/speech_recognition_messages.h" | |
10 #include "content/renderer/render_view_impl.h" | |
11 #include "third_party/WebKit/Source/WebKit/chromium/public/platform/WebString.h" | |
12 #include "third_party/WebKit/Source/WebKit/chromium/public/platform/WebVector.h" | |
13 #include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechGrammar.h" | |
14 #include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechRecognitionP arams.h" | |
15 #include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechRecognitionR esult.h" | |
16 #include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechRecognizer.h " | |
17 #include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechRecognizerCl ient.h" | |
18 | |
19 using content::SpeechRecognitionError; | |
20 using content::SpeechRecognitionResult; | |
21 using WebKit::WebVector; | |
22 using WebKit::WebString; | |
23 using WebKit::WebSpeechGrammar; | |
24 using WebKit::WebSpeechRecognitionHandle; | |
25 using WebKit::WebSpeechRecognitionResult; | |
26 using WebKit::WebSpeechRecognitionParams; | |
27 using WebKit::WebSpeechRecognizerClient; | |
28 | |
29 SpeechRecognitionDispatcher::SpeechRecognitionDispatcher( | |
30 RenderViewImpl* render_view) | |
31 : content::RenderViewObserver(render_view), | |
32 recognizer_client_(NULL), | |
33 last_mapping_id_(0) { | |
34 } | |
35 | |
36 SpeechRecognitionDispatcher::~SpeechRecognitionDispatcher() { | |
37 } | |
38 | |
39 bool SpeechRecognitionDispatcher::OnMessageReceived( | |
40 const IPC::Message& message) { | |
41 bool handled = true; | |
42 IPC_BEGIN_MESSAGE_MAP(SpeechRecognitionDispatcher, message) | |
43 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Started, OnRecognitionStarted) | |
44 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioStarted, OnAudioStarted) | |
45 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_SoundStarted, OnSoundStarted) | |
46 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_SoundEnded, OnSoundEnded) | |
47 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioEnded, OnAudioEnded) | |
48 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ErrorOccurred,OnErrorOccurred) | |
49 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Ended, OnRecognitionEnded) | |
50 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ResultRetrieved, | |
51 OnResultRetrieved) | |
52 IPC_MESSAGE_UNHANDLED(handled = false) | |
53 IPC_END_MESSAGE_MAP() | |
54 return handled; | |
55 } | |
56 | |
57 void SpeechRecognitionDispatcher::start( | |
58 const WebSpeechRecognitionHandle& handle, | |
59 const WebSpeechRecognitionParams& params, | |
60 WebSpeechRecognizerClient* recognizer_client) { | |
61 //TODO(primiano) What to do if a start is issued to an already started object? | |
62 DCHECK(!recognizer_client_ || recognizer_client_ == recognizer_client); | |
63 recognizer_client_ = recognizer_client; | |
64 | |
65 SpeechRecognitionHostMsg_StartRequest_Params msg_params; | |
66 for (size_t i = 0; i < params.grammars().size(); ++i) { | |
67 const WebSpeechGrammar& grammar = params.grammars()[i]; | |
68 msg_params.grammars.push_back( | |
69 content::SpeechRecognitionGrammar(grammar.src().spec(), | |
70 grammar.weight())); | |
71 } | |
72 msg_params.language = UTF16ToUTF8(params.language()); | |
73 msg_params.is_one_shot = !params.continuous(); | |
74 msg_params.origin_url = ""; // TODO(primiano) we need an origin from WebKit. | |
75 msg_params.render_view_id = routing_id(); | |
76 msg_params.js_handle_id = GetIDForHandle(handle); | |
77 Send(new SpeechRecognitionHostMsg_StartRequest(msg_params)); | |
78 } | |
79 | |
80 void SpeechRecognitionDispatcher::stop( | |
81 const WebSpeechRecognitionHandle& handle, | |
82 WebSpeechRecognizerClient* recognizer_client) { | |
83 DCHECK(recognizer_client_ == recognizer_client); | |
84 Send(new SpeechRecognitionHostMsg_StopCaptureRequest(routing_id(), | |
85 GetIDForHandle(handle))); | |
86 } | |
87 | |
88 void SpeechRecognitionDispatcher::abort( | |
89 const WebSpeechRecognitionHandle& handle, | |
90 WebSpeechRecognizerClient* recognizer_client) { | |
91 Send(new SpeechRecognitionHostMsg_AbortRequest(routing_id(), | |
92 GetIDForHandle(handle))); | |
93 } | |
94 | |
95 void SpeechRecognitionDispatcher::OnRecognitionStarted(int js_handle_id) { | |
96 DCHECK(recognizer_client_); | |
97 recognizer_client_->didStart(GetHandleFromID(js_handle_id)); | |
98 } | |
99 | |
100 void SpeechRecognitionDispatcher::OnAudioStarted(int js_handle_id) { | |
101 DCHECK(recognizer_client_); | |
102 recognizer_client_->didStartAudio(GetHandleFromID(js_handle_id)); | |
103 } | |
104 | |
105 void SpeechRecognitionDispatcher::OnSoundStarted(int js_handle_id) { | |
106 DCHECK(recognizer_client_); | |
107 recognizer_client_->didStartSound(GetHandleFromID(js_handle_id)); | |
108 } | |
109 | |
110 void SpeechRecognitionDispatcher::OnSoundEnded(int js_handle_id) { | |
111 DCHECK(recognizer_client_); | |
112 recognizer_client_->didEndSound(GetHandleFromID(js_handle_id)); | |
113 } | |
114 | |
115 void SpeechRecognitionDispatcher::OnAudioEnded(int js_handle_id) { | |
116 DCHECK(recognizer_client_); | |
117 recognizer_client_->didEndAudio(GetHandleFromID(js_handle_id)); | |
118 } | |
119 | |
120 void SpeechRecognitionDispatcher::OnErrorOccurred( | |
121 int js_handle_id, const SpeechRecognitionError& error) { | |
122 DCHECK(recognizer_client_); | |
123 if (error.code == content::SPEECH_RECOGNITION_ERROR_NO_MATCH) { | |
124 recognizer_client_->didReceiveNoMatch(GetHandleFromID(js_handle_id), | |
125 WebSpeechRecognitionResult()); | |
126 } else { | |
127 // TODO(primiano) speech_recognition_error.h must be updated with the new | |
128 // API specs soon. | |
129 WebSpeechRecognizerClient::ErrorCode wk_error_code; | |
130 switch (error.code) { | |
131 case content::SPEECH_RECOGNITION_ERROR_ABORTED: | |
132 wk_error_code = WebSpeechRecognizerClient::AbortedError; | |
133 break; | |
134 case content::SPEECH_RECOGNITION_ERROR_AUDIO: | |
135 wk_error_code = WebSpeechRecognizerClient::AudioCaptureError; | |
136 break; | |
137 case content::SPEECH_RECOGNITION_ERROR_NETWORK: | |
138 wk_error_code = WebSpeechRecognizerClient::NetworkError; | |
139 break; | |
140 case content::SPEECH_RECOGNITION_ERROR_NO_SPEECH: | |
141 wk_error_code = WebSpeechRecognizerClient::NoSpeechError; | |
142 break; | |
143 case content::SPEECH_RECOGNITION_ERROR_BAD_GRAMMAR: | |
144 wk_error_code = WebSpeechRecognizerClient::BadGrammarError; | |
145 break; | |
146 default: | |
147 NOTREACHED(); | |
148 wk_error_code = WebSpeechRecognizerClient::OtherError; | |
149 } | |
150 recognizer_client_->didReceiveError(GetHandleFromID(js_handle_id), | |
151 WebString(), // TODO(primiano) message? | |
152 wk_error_code); | |
153 } | |
154 } | |
155 | |
156 void SpeechRecognitionDispatcher::OnRecognitionEnded(int js_handle_id) { | |
157 DCHECK(recognizer_client_); | |
158 recognizer_client_->didEnd(GetHandleFromID(js_handle_id)); | |
159 handle_map_.erase(js_handle_id); | |
160 } | |
161 | |
162 void SpeechRecognitionDispatcher::OnResultRetrieved( | |
163 int js_handle_id, const SpeechRecognitionResult& result) { | |
164 DCHECK(recognizer_client_); | |
165 | |
166 const size_t num_hypotheses = result.hypotheses.size(); | |
167 WebSpeechRecognitionResult webkit_result; | |
168 WebVector<WebString> transcripts(num_hypotheses); | |
169 WebVector<float> confidences(num_hypotheses); | |
170 for (size_t i = 0; i < num_hypotheses; ++i) { | |
171 transcripts[i] = result.hypotheses[i].utterance; | |
172 confidences[i] = static_cast<float>(result.hypotheses[i].confidence); | |
173 } | |
174 webkit_result.assign(transcripts, confidences, !result.provisional); | |
Satish
2012/05/20 22:57:46
Not related to this CL, but passing the transcript
| |
175 // TODO(primiano) Handle history, currently empty. | |
176 WebVector<WebSpeechRecognitionResult> empty_history; | |
177 recognizer_client_->didReceiveResult(GetHandleFromID(js_handle_id), | |
178 webkit_result, | |
179 0, // result_index | |
180 empty_history); | |
181 } | |
182 | |
183 int SpeechRecognitionDispatcher::GetIDForHandle( | |
184 const WebSpeechRecognitionHandle& handle) { | |
185 // Search first for an existing mapping. | |
186 for (HandleMap::iterator iter = handle_map_.begin(); | |
187 iter != handle_map_.end(); | |
188 ++iter) { | |
189 if (iter->second.equals(handle)) | |
190 return iter->first; | |
191 } | |
192 // If no existing mapping found, create a new one. | |
193 ++last_mapping_id_; | |
194 handle_map_[last_mapping_id_] = handle; | |
195 return last_mapping_id_; | |
196 } | |
197 | |
198 const WebSpeechRecognitionHandle& SpeechRecognitionDispatcher::GetHandleFromID( | |
199 int js_handle_id) { | |
200 HandleMap::iterator iter = handle_map_.find(js_handle_id); | |
201 DCHECK(iter != handle_map_.end()); | |
202 return iter->second; | |
203 } | |
OLD | NEW |