OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Apple Inc. All rights reserved. | 2 * Copyright (C) 2013 Apple Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
6 * are met: | 6 * are met: |
7 * 1. Redistributions of source code must retain the above copyright | 7 * 1. Redistributions of source code must retain the above copyright |
8 * notice, this list of conditions and the following disclaimer. | 8 * notice, this list of conditions and the following disclaimer. |
9 * 2. Redistributions in binary form must reproduce the above copyright | 9 * 2. Redistributions in binary form must reproduce the above copyright |
10 * notice, this list of conditions and the following disclaimer in the | 10 * notice, this list of conditions and the following disclaimer in the |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
54 | 54 |
55 void SpeechSynthesis::voicesDidChange() | 55 void SpeechSynthesis::voicesDidChange() |
56 { | 56 { |
57 m_voiceList.clear(); | 57 m_voiceList.clear(); |
58 } | 58 } |
59 | 59 |
60 const Vector<RefPtr<SpeechSynthesisVoice> >& SpeechSynthesis::getVoices() | 60 const Vector<RefPtr<SpeechSynthesisVoice> >& SpeechSynthesis::getVoices() |
61 { | 61 { |
62 if (m_voiceList.size()) | 62 if (m_voiceList.size()) |
63 return m_voiceList; | 63 return m_voiceList; |
64 | 64 |
65 // If the voiceList is empty, that's the cue to get the voices from the plat
form again. | 65 // If the voiceList is empty, that's the cue to get the voices from the plat
form again. |
66 const Vector<RefPtr<PlatformSpeechSynthesisVoice> >& platformVoices = m_plat
formSpeechSynthesizer->voiceList(); | 66 const Vector<RefPtr<PlatformSpeechSynthesisVoice> >& platformVoices = m_plat
formSpeechSynthesizer->voiceList(); |
67 size_t voiceCount = platformVoices.size(); | 67 size_t voiceCount = platformVoices.size(); |
68 for (size_t k = 0; k < voiceCount; k++) | 68 for (size_t k = 0; k < voiceCount; k++) |
69 m_voiceList.append(SpeechSynthesisVoice::create(platformVoices[k])); | 69 m_voiceList.append(SpeechSynthesisVoice::create(platformVoices[k])); |
70 | 70 |
71 return m_voiceList; | 71 return m_voiceList; |
72 } | 72 } |
73 | 73 |
74 bool SpeechSynthesis::speaking() const | 74 bool SpeechSynthesis::speaking() const |
(...skipping 20 matching lines...) Expand all Loading... |
95 ASSERT(!m_currentSpeechUtterance); | 95 ASSERT(!m_currentSpeechUtterance); |
96 utterance->setStartTime(monotonicallyIncreasingTime()); | 96 utterance->setStartTime(monotonicallyIncreasingTime()); |
97 m_currentSpeechUtterance = utterance; | 97 m_currentSpeechUtterance = utterance; |
98 m_isPaused = false; | 98 m_isPaused = false; |
99 m_platformSpeechSynthesizer->speak(utterance->platformUtterance()); | 99 m_platformSpeechSynthesizer->speak(utterance->platformUtterance()); |
100 } | 100 } |
101 | 101 |
102 void SpeechSynthesis::speak(SpeechSynthesisUtterance* utterance) | 102 void SpeechSynthesis::speak(SpeechSynthesisUtterance* utterance) |
103 { | 103 { |
104 m_utteranceQueue.append(utterance); | 104 m_utteranceQueue.append(utterance); |
105 | 105 |
106 // If the queue was empty, speak this immediately and add it to the queue. | 106 // If the queue was empty, speak this immediately and add it to the queue. |
107 if (m_utteranceQueue.size() == 1) | 107 if (m_utteranceQueue.size() == 1) |
108 startSpeakingImmediately(utterance); | 108 startSpeakingImmediately(utterance); |
109 } | 109 } |
110 | 110 |
111 void SpeechSynthesis::cancel() | 111 void SpeechSynthesis::cancel() |
112 { | 112 { |
113 // Remove all the items from the utterance queue. | 113 // Remove all the items from the utterance queue. |
114 // Hold on to the current utterance so the platform synthesizer can have a c
hance to clean up. | 114 // Hold on to the current utterance so the platform synthesizer can have a c
hance to clean up. |
115 RefPtr<SpeechSynthesisUtterance> current = m_currentSpeechUtterance; | 115 RefPtr<SpeechSynthesisUtterance> current = m_currentSpeechUtterance; |
116 m_utteranceQueue.clear(); | 116 m_utteranceQueue.clear(); |
117 m_platformSpeechSynthesizer->cancel(); | 117 m_platformSpeechSynthesizer->cancel(); |
118 current = 0; | 118 current = 0; |
119 | 119 |
120 // The platform should have called back immediately and cleared the current
utterance. | 120 // The platform should have called back immediately and cleared the current
utterance. |
121 ASSERT(!m_currentSpeechUtterance); | 121 ASSERT(!m_currentSpeechUtterance); |
122 } | 122 } |
123 | 123 |
124 void SpeechSynthesis::pause() | 124 void SpeechSynthesis::pause() |
125 { | 125 { |
126 if (!m_isPaused) | 126 if (!m_isPaused) |
127 m_platformSpeechSynthesizer->pause(); | 127 m_platformSpeechSynthesizer->pause(); |
128 } | 128 } |
129 | 129 |
130 void SpeechSynthesis::resume() | 130 void SpeechSynthesis::resume() |
131 { | 131 { |
132 if (!m_currentSpeechUtterance) | 132 if (!m_currentSpeechUtterance) |
133 return; | 133 return; |
134 m_platformSpeechSynthesizer->resume(); | 134 m_platformSpeechSynthesizer->resume(); |
135 } | 135 } |
136 | 136 |
137 void SpeechSynthesis::fireEvent(const AtomicString& type, SpeechSynthesisUtteran
ce* utterance, unsigned long charIndex, const String& name) | 137 void SpeechSynthesis::fireEvent(const AtomicString& type, SpeechSynthesisUtteran
ce* utterance, unsigned long charIndex, const String& name) |
138 { | 138 { |
139 utterance->dispatchEvent(SpeechSynthesisEvent::create(type, charIndex, (curr
entTime() - utterance->startTime()), name)); | 139 utterance->dispatchEvent(SpeechSynthesisEvent::create(type, charIndex, (curr
entTime() - utterance->startTime()), name)); |
140 } | 140 } |
141 | 141 |
142 void SpeechSynthesis::handleSpeakingCompleted(SpeechSynthesisUtterance* utteranc
e, bool errorOccurred) | 142 void SpeechSynthesis::handleSpeakingCompleted(SpeechSynthesisUtterance* utteranc
e, bool errorOccurred) |
143 { | 143 { |
144 ASSERT(utterance); | 144 ASSERT(utterance); |
145 ASSERT(m_currentSpeechUtterance); | 145 ASSERT(m_currentSpeechUtterance); |
146 m_currentSpeechUtterance = 0; | 146 m_currentSpeechUtterance = 0; |
147 | 147 |
148 fireEvent(errorOccurred ? eventNames().errorEvent : eventNames().endEvent, u
tterance, 0, String()); | 148 fireEvent(errorOccurred ? eventNames().errorEvent : eventNames().endEvent, u
tterance, 0, String()); |
149 | 149 |
150 if (m_utteranceQueue.size()) { | 150 if (m_utteranceQueue.size()) { |
151 RefPtr<SpeechSynthesisUtterance> firstUtterance = m_utteranceQueue.first
(); | 151 RefPtr<SpeechSynthesisUtterance> firstUtterance = m_utteranceQueue.first
(); |
152 ASSERT(firstUtterance == utterance); | 152 ASSERT(firstUtterance == utterance); |
153 if (firstUtterance == utterance) | 153 if (firstUtterance == utterance) |
154 m_utteranceQueue.removeFirst(); | 154 m_utteranceQueue.removeFirst(); |
155 | 155 |
156 // Start the next job if there is one pending. | 156 // Start the next job if there is one pending. |
157 if (!m_utteranceQueue.isEmpty()) | 157 if (!m_utteranceQueue.isEmpty()) |
158 startSpeakingImmediately(m_utteranceQueue.first().get()); | 158 startSpeakingImmediately(m_utteranceQueue.first().get()); |
159 } | 159 } |
160 } | 160 } |
161 | 161 |
162 void SpeechSynthesis::boundaryEventOccurred(PassRefPtr<PlatformSpeechSynthesisUt
terance> utterance, SpeechBoundary boundary, unsigned charIndex) | 162 void SpeechSynthesis::boundaryEventOccurred(PassRefPtr<PlatformSpeechSynthesisUt
terance> utterance, SpeechBoundary boundary, unsigned charIndex) |
163 { | 163 { |
164 DEFINE_STATIC_LOCAL(const String, wordBoundaryString, (ASCIILiteral("word"))
); | 164 DEFINE_STATIC_LOCAL(const String, wordBoundaryString, (ASCIILiteral("word"))
); |
165 DEFINE_STATIC_LOCAL(const String, sentenceBoundaryString, (ASCIILiteral("sen
tence"))); | 165 DEFINE_STATIC_LOCAL(const String, sentenceBoundaryString, (ASCIILiteral("sen
tence"))); |
166 | 166 |
167 switch (boundary) { | 167 switch (boundary) { |
168 case SpeechWordBoundary: | 168 case SpeechWordBoundary: |
169 fireEvent(eventNames().boundaryEvent, static_cast<SpeechSynthesisUtteran
ce*>(utterance->client()), charIndex, wordBoundaryString); | 169 fireEvent(eventNames().boundaryEvent, static_cast<SpeechSynthesisUtteran
ce*>(utterance->client()), charIndex, wordBoundaryString); |
170 break; | 170 break; |
171 case SpeechSentenceBoundary: | 171 case SpeechSentenceBoundary: |
172 fireEvent(eventNames().boundaryEvent, static_cast<SpeechSynthesisUtteran
ce*>(utterance->client()), charIndex, sentenceBoundaryString); | 172 fireEvent(eventNames().boundaryEvent, static_cast<SpeechSynthesisUtteran
ce*>(utterance->client()), charIndex, sentenceBoundaryString); |
173 break; | 173 break; |
174 default: | 174 default: |
175 ASSERT_NOT_REACHED(); | 175 ASSERT_NOT_REACHED(); |
176 } | 176 } |
177 } | 177 } |
178 | 178 |
179 void SpeechSynthesis::didStartSpeaking(PassRefPtr<PlatformSpeechSynthesisUtteran
ce> utterance) | 179 void SpeechSynthesis::didStartSpeaking(PassRefPtr<PlatformSpeechSynthesisUtteran
ce> utterance) |
180 { | 180 { |
181 if (utterance->client()) | 181 if (utterance->client()) |
182 fireEvent(eventNames().startEvent, static_cast<SpeechSynthesisUtterance*
>(utterance->client()), 0, String()); | 182 fireEvent(eventNames().startEvent, static_cast<SpeechSynthesisUtterance*
>(utterance->client()), 0, String()); |
183 } | 183 } |
184 | 184 |
185 void SpeechSynthesis::didPauseSpeaking(PassRefPtr<PlatformSpeechSynthesisUtteran
ce> utterance) | 185 void SpeechSynthesis::didPauseSpeaking(PassRefPtr<PlatformSpeechSynthesisUtteran
ce> utterance) |
186 { | 186 { |
187 m_isPaused = true; | 187 m_isPaused = true; |
188 if (utterance->client()) | 188 if (utterance->client()) |
189 fireEvent(eventNames().pauseEvent, static_cast<SpeechSynthesisUtterance*
>(utterance->client()), 0, String()); | 189 fireEvent(eventNames().pauseEvent, static_cast<SpeechSynthesisUtterance*
>(utterance->client()), 0, String()); |
190 } | 190 } |
191 | 191 |
192 void SpeechSynthesis::didResumeSpeaking(PassRefPtr<PlatformSpeechSynthesisUttera
nce> utterance) | 192 void SpeechSynthesis::didResumeSpeaking(PassRefPtr<PlatformSpeechSynthesisUttera
nce> utterance) |
193 { | 193 { |
194 m_isPaused = false; | 194 m_isPaused = false; |
195 if (utterance->client()) | 195 if (utterance->client()) |
196 fireEvent(eventNames().resumeEvent, static_cast<SpeechSynthesisUtterance
*>(utterance->client()), 0, String()); | 196 fireEvent(eventNames().resumeEvent, static_cast<SpeechSynthesisUtterance
*>(utterance->client()), 0, String()); |
197 } | 197 } |
198 | 198 |
199 void SpeechSynthesis::didFinishSpeaking(PassRefPtr<PlatformSpeechSynthesisUttera
nce> utterance) | 199 void SpeechSynthesis::didFinishSpeaking(PassRefPtr<PlatformSpeechSynthesisUttera
nce> utterance) |
200 { | 200 { |
201 if (utterance->client()) | 201 if (utterance->client()) |
202 handleSpeakingCompleted(static_cast<SpeechSynthesisUtterance*>(utterance
->client()), false); | 202 handleSpeakingCompleted(static_cast<SpeechSynthesisUtterance*>(utterance
->client()), false); |
203 } | 203 } |
204 | 204 |
205 void SpeechSynthesis::speakingErrorOccurred(PassRefPtr<PlatformSpeechSynthesisUt
terance> utterance) | 205 void SpeechSynthesis::speakingErrorOccurred(PassRefPtr<PlatformSpeechSynthesisUt
terance> utterance) |
206 { | 206 { |
207 if (utterance->client()) | 207 if (utterance->client()) |
208 handleSpeakingCompleted(static_cast<SpeechSynthesisUtterance*>(utterance
->client()), true); | 208 handleSpeakingCompleted(static_cast<SpeechSynthesisUtterance*>(utterance
->client()), true); |
209 } | 209 } |
210 | 210 |
211 } // namespace WebCore | 211 } // namespace WebCore |
OLD | NEW |