OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2010, Google Inc. All rights reserved. | 2 * Copyright (C) 2010, Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
6 * are met: | 6 * are met: |
7 * 1. Redistributions of source code must retain the above copyright | 7 * 1. Redistributions of source code must retain the above copyright |
8 * notice, this list of conditions and the following disclaimer. | 8 * notice, this list of conditions and the following disclaimer. |
9 * 2. Redistributions in binary form must reproduce the above copyright | 9 * 2. Redistributions in binary form must reproduce the above copyright |
10 * notice, this list of conditions and the following disclaimer in the | 10 * notice, this list of conditions and the following disclaimer in the |
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
82 void PannerNode::pullInputs(size_t framesToProcess) | 82 void PannerNode::pullInputs(size_t framesToProcess) |
83 { | 83 { |
84 // We override pullInputs(), so we can detect new AudioSourceNodes which hav
e connected to us when new connections are made. | 84 // We override pullInputs(), so we can detect new AudioSourceNodes which hav
e connected to us when new connections are made. |
85 // These AudioSourceNodes need to be made aware of our existence in order to
handle doppler shift pitch changes. | 85 // These AudioSourceNodes need to be made aware of our existence in order to
handle doppler shift pitch changes. |
86 if (m_connectionCount != context()->connectionCount()) { | 86 if (m_connectionCount != context()->connectionCount()) { |
87 m_connectionCount = context()->connectionCount(); | 87 m_connectionCount = context()->connectionCount(); |
88 | 88 |
89 // Recursively go through all nodes connected to us. | 89 // Recursively go through all nodes connected to us. |
90 notifyAudioSourcesConnectedToNode(this); | 90 notifyAudioSourcesConnectedToNode(this); |
91 } | 91 } |
92 | 92 |
93 AudioNode::pullInputs(framesToProcess); | 93 AudioNode::pullInputs(framesToProcess); |
94 } | 94 } |
95 | 95 |
96 void PannerNode::process(size_t framesToProcess) | 96 void PannerNode::process(size_t framesToProcess) |
97 { | 97 { |
98 AudioBus* destination = output(0)->bus(); | 98 AudioBus* destination = output(0)->bus(); |
99 | 99 |
100 if (!isInitialized() || !input(0)->isConnected() || !m_panner.get()) { | 100 if (!isInitialized() || !input(0)->isConnected() || !m_panner.get()) { |
101 destination->zero(); | 101 destination->zero(); |
102 return; | 102 return; |
(...skipping 14 matching lines...) Expand all Loading... |
117 double elevation; | 117 double elevation; |
118 getAzimuthElevation(&azimuth, &elevation); | 118 getAzimuthElevation(&azimuth, &elevation); |
119 m_panner->pan(azimuth, elevation, source, destination, framesToProcess); | 119 m_panner->pan(azimuth, elevation, source, destination, framesToProcess); |
120 | 120 |
121 // Get the distance and cone gain. | 121 // Get the distance and cone gain. |
122 double totalGain = distanceConeGain(); | 122 double totalGain = distanceConeGain(); |
123 | 123 |
124 // Snap to desired gain at the beginning. | 124 // Snap to desired gain at the beginning. |
125 if (m_lastGain == -1.0) | 125 if (m_lastGain == -1.0) |
126 m_lastGain = totalGain; | 126 m_lastGain = totalGain; |
127 | 127 |
128 // Apply gain in-place with de-zippering. | 128 // Apply gain in-place with de-zippering. |
129 destination->copyWithGainFrom(*destination, &m_lastGain, totalGain); | 129 destination->copyWithGainFrom(*destination, &m_lastGain, totalGain); |
130 } else { | 130 } else { |
131 // Too bad - The tryLock() failed. We must be in the middle of changing
the panner. | 131 // Too bad - The tryLock() failed. We must be in the middle of changing
the panner. |
132 destination->zero(); | 132 destination->zero(); |
133 } | 133 } |
134 } | 134 } |
135 | 135 |
136 void PannerNode::reset() | 136 void PannerNode::reset() |
137 { | 137 { |
138 m_lastGain = -1.0; // force to snap to initial gain | 138 m_lastGain = -1.0; // force to snap to initial gain |
139 if (m_panner.get()) | 139 if (m_panner.get()) |
140 m_panner->reset(); | 140 m_panner->reset(); |
141 } | 141 } |
142 | 142 |
143 void PannerNode::initialize() | 143 void PannerNode::initialize() |
144 { | 144 { |
145 if (isInitialized()) | 145 if (isInitialized()) |
146 return; | 146 return; |
147 | 147 |
148 m_panner = Panner::create(m_panningModel, sampleRate(), context()->hrtfDatab
aseLoader()); | 148 m_panner = Panner::create(m_panningModel, sampleRate(), context()->hrtfDatab
aseLoader()); |
149 | 149 |
150 AudioNode::initialize(); | 150 AudioNode::initialize(); |
151 } | 151 } |
152 | 152 |
153 void PannerNode::uninitialize() | 153 void PannerNode::uninitialize() |
154 { | 154 { |
155 if (!isInitialized()) | 155 if (!isInitialized()) |
156 return; | 156 return; |
157 | 157 |
158 m_panner.clear(); | 158 m_panner.clear(); |
159 AudioNode::uninitialize(); | 159 AudioNode::uninitialize(); |
160 } | 160 } |
161 | 161 |
162 AudioListener* PannerNode::listener() | 162 AudioListener* PannerNode::listener() |
163 { | 163 { |
164 return context()->listener(); | 164 return context()->listener(); |
165 } | 165 } |
166 | 166 |
167 String PannerNode::panningModel() const | 167 String PannerNode::panningModel() const |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
205 m_panningModel = model; | 205 m_panningModel = model; |
206 } | 206 } |
207 break; | 207 break; |
208 case SOUNDFIELD: | 208 case SOUNDFIELD: |
209 // FIXME: Implement sound field model. See // https://bugs.webkit.org/sh
ow_bug.cgi?id=77367. | 209 // FIXME: Implement sound field model. See // https://bugs.webkit.org/sh
ow_bug.cgi?id=77367. |
210 context()->scriptExecutionContext()->addConsoleMessage(JSMessageSource,
WarningMessageLevel, "'soundfield' panning model not implemented."); | 210 context()->scriptExecutionContext()->addConsoleMessage(JSMessageSource,
WarningMessageLevel, "'soundfield' panning model not implemented."); |
211 break; | 211 break; |
212 default: | 212 default: |
213 return false; | 213 return false; |
214 } | 214 } |
215 | 215 |
216 return true; | 216 return true; |
217 } | 217 } |
218 | 218 |
219 String PannerNode::distanceModel() const | 219 String PannerNode::distanceModel() const |
220 { | 220 { |
221 switch (const_cast<PannerNode*>(this)->m_distanceEffect.model()) { | 221 switch (const_cast<PannerNode*>(this)->m_distanceEffect.model()) { |
222 case DistanceEffect::ModelLinear: | 222 case DistanceEffect::ModelLinear: |
223 return "linear"; | 223 return "linear"; |
224 case DistanceEffect::ModelInverse: | 224 case DistanceEffect::ModelInverse: |
225 return "inverse"; | 225 return "inverse"; |
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
356 listenerProjection = min(listenerProjection, scaledSpeedOfSound); | 356 listenerProjection = min(listenerProjection, scaledSpeedOfSound); |
357 sourceProjection = min(sourceProjection, scaledSpeedOfSound); | 357 sourceProjection = min(sourceProjection, scaledSpeedOfSound); |
358 | 358 |
359 dopplerShift = ((speedOfSound - dopplerFactor * listenerProjection)
/ (speedOfSound - dopplerFactor * sourceProjection)); | 359 dopplerShift = ((speedOfSound - dopplerFactor * listenerProjection)
/ (speedOfSound - dopplerFactor * sourceProjection)); |
360 fixNANs(dopplerShift); // avoid illegal values | 360 fixNANs(dopplerShift); // avoid illegal values |
361 | 361 |
362 // Limit the pitch shifting to 4 octaves up and 3 octaves down. | 362 // Limit the pitch shifting to 4 octaves up and 3 octaves down. |
363 if (dopplerShift > 16.0) | 363 if (dopplerShift > 16.0) |
364 dopplerShift = 16.0; | 364 dopplerShift = 16.0; |
365 else if (dopplerShift < 0.125) | 365 else if (dopplerShift < 0.125) |
366 dopplerShift = 0.125; | 366 dopplerShift = 0.125; |
367 } | 367 } |
368 } | 368 } |
369 | 369 |
370 return static_cast<float>(dopplerShift); | 370 return static_cast<float>(dopplerShift); |
371 } | 371 } |
372 | 372 |
373 float PannerNode::distanceConeGain() | 373 float PannerNode::distanceConeGain() |
374 { | 374 { |
375 FloatPoint3D listenerPosition = listener()->position(); | 375 FloatPoint3D listenerPosition = listener()->position(); |
376 | 376 |
377 double listenerDistance = m_position.distanceTo(listenerPosition); | 377 double listenerDistance = m_position.distanceTo(listenerPosition); |
378 double distanceGain = m_distanceEffect.gain(listenerDistance); | 378 double distanceGain = m_distanceEffect.gain(listenerDistance); |
379 | 379 |
380 m_distanceGain->setValue(static_cast<float>(distanceGain)); | 380 m_distanceGain->setValue(static_cast<float>(distanceGain)); |
381 | 381 |
382 // FIXME: could optimize by caching coneGain | 382 // FIXME: could optimize by caching coneGain |
383 double coneGain = m_coneEffect.gain(m_position, m_orientation, listenerPosit
ion); | 383 double coneGain = m_coneEffect.gain(m_position, m_orientation, listenerPosit
ion); |
384 | 384 |
385 m_coneGain->setValue(static_cast<float>(coneGain)); | 385 m_coneGain->setValue(static_cast<float>(coneGain)); |
386 | 386 |
387 return float(distanceGain * coneGain); | 387 return float(distanceGain * coneGain); |
388 } | 388 } |
389 | 389 |
390 void PannerNode::notifyAudioSourcesConnectedToNode(AudioNode* node) | 390 void PannerNode::notifyAudioSourcesConnectedToNode(AudioNode* node) |
391 { | 391 { |
392 ASSERT(node); | 392 ASSERT(node); |
393 if (!node) | 393 if (!node) |
394 return; | 394 return; |
395 | 395 |
396 // First check if this node is an AudioBufferSourceNode. If so, let it know
about us so that doppler shift pitch can be taken into account. | 396 // First check if this node is an AudioBufferSourceNode. If so, let it know
about us so that doppler shift pitch can be taken into account. |
397 if (node->nodeType() == NodeTypeAudioBufferSource) { | 397 if (node->nodeType() == NodeTypeAudioBufferSource) { |
398 AudioBufferSourceNode* bufferSourceNode = static_cast<AudioBufferSourceN
ode*>(node); | 398 AudioBufferSourceNode* bufferSourceNode = static_cast<AudioBufferSourceN
ode*>(node); |
399 bufferSourceNode->setPannerNode(this); | 399 bufferSourceNode->setPannerNode(this); |
400 } else { | 400 } else { |
401 // Go through all inputs to this node. | 401 // Go through all inputs to this node. |
402 for (unsigned i = 0; i < node->numberOfInputs(); ++i) { | 402 for (unsigned i = 0; i < node->numberOfInputs(); ++i) { |
403 AudioNodeInput* input = node->input(i); | 403 AudioNodeInput* input = node->input(i); |
404 | 404 |
405 // For each input, go through all of its connections, looking for Au
dioBufferSourceNodes. | 405 // For each input, go through all of its connections, looking for Au
dioBufferSourceNodes. |
406 for (unsigned j = 0; j < input->numberOfRenderingConnections(); ++j)
{ | 406 for (unsigned j = 0; j < input->numberOfRenderingConnections(); ++j)
{ |
407 AudioNodeOutput* connectedOutput = input->renderingOutput(j); | 407 AudioNodeOutput* connectedOutput = input->renderingOutput(j); |
408 AudioNode* connectedNode = connectedOutput->node(); | 408 AudioNode* connectedNode = connectedOutput->node(); |
409 notifyAudioSourcesConnectedToNode(connectedNode); // recurse | 409 notifyAudioSourcesConnectedToNode(connectedNode); // recurse |
410 } | 410 } |
411 } | 411 } |
412 } | 412 } |
413 } | 413 } |
414 | 414 |
415 } // namespace WebCore | 415 } // namespace WebCore |
416 | 416 |
417 #endif // ENABLE(WEB_AUDIO) | 417 #endif // ENABLE(WEB_AUDIO) |
OLD | NEW |