Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(486)

Side by Side Diff: media/audio/mac/audio_synchronized_mac.cc

Issue 10909185: Add Mac OS X synchronized audio I/O back-end (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: Created 8 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "media/audio/mac/audio_synchronized_mac.h"
6
7 #include <CoreServices/CoreServices.h>
8 #include <algorithm>
9
10 #include "base/basictypes.h"
11 #include "base/debug/trace_event.h"
12 #include "base/logging.h"
13 #include "base/mac/mac_logging.h"
14 #include "media/audio/audio_util.h"
15 #include "media/audio/mac/audio_manager_mac.h"
16
17 namespace media {
18
19 static const int kHardwareBufferSize = 128;
20 static const int kFifoSize = 16384;
21
22 // TODO(crogers): handle the non-stereo case.
23 static const int kChannels = 2;
24
25 // This value was determined empirically for minimum latency while still
26 // guarding against FIFO under-runs.
27 static const int kBaseTargetFifoFrames = 256 + 64;
28
29 // If the input and output sample-rate don't match, then we need to maintain
30 // an additional safety margin due to the callback timing jitter and the
31 // varispeed buffering. This value was empirically tuned.
32 static const int kAdditionalTargetFifoFrames = 128;
33
34 static void ZeroBufferList(AudioBufferList* buffer_list) {
35 for (size_t i = 0; i < buffer_list->mNumberBuffers; ++i)
36 memset(buffer_list->mBuffers[i].mData,
37 0,
38 buffer_list->mBuffers[i].mDataByteSize);
39 }
40
41 static void WrapBufferList(AudioBufferList* buffer_list,
42 AudioBus* bus,
43 int frames) {
44 DCHECK(buffer_list);
45 DCHECK(bus);
46 int channels = bus->channels();
47 int buffer_list_channels = buffer_list->mNumberBuffers;
48
49 // Copy pointers from AudioBufferList.
50 // It's ok to pass in a |buffer_list| with fewer channels, in which
scherkus (not reviewing) 2012/09/17 23:07:17 update comment? I'm not seeing us pass stuff in
Chris Rogers 2012/09/18 00:22:36 I've moved part of this comment down to line 57 wh
51 // case we just duplicate the last channel.
52 int source_idx = 0;
53 for (int i = 0; i < channels; ++i) {
54 bus->SetChannelData(
55 i, static_cast<float*>(buffer_list->mBuffers[source_idx].mData));
56
57 if (source_idx < buffer_list_channels - 1)
58 ++source_idx;
59 }
60
61 // Finally set the actual length.
62 bus->set_frames(frames);
63 }
64
65 AudioSynchronizedStream::AudioSynchronizedStream(
66 AudioManagerMac* manager,
67 const AudioParameters& params,
68 AudioDeviceID input_id,
69 AudioDeviceID output_id)
70 : manager_(manager),
71 params_(params),
72 input_sample_rate_(0),
73 output_sample_rate_(0),
74 input_id_(input_id),
75 output_id_(output_id),
76 input_buffer_list_(NULL),
77 fifo_(kChannels, kFifoSize),
78 target_fifo_frames_(kBaseTargetFifoFrames),
79 average_delta_(0.0),
80 fifo_rate_compensation_(1.0),
81 input_unit_(0),
82 varispeed_unit_(0),
83 output_unit_(0),
84 first_input_time_(-1),
85 is_running_(false),
86 hardware_buffer_size_(kHardwareBufferSize),
87 channels_(kChannels) {
88 }
89
90 AudioSynchronizedStream::~AudioSynchronizedStream() {
91 DCHECK(!input_unit_);
92 DCHECK(!output_unit_);
93 DCHECK(!varispeed_unit_);
94 }
95
96 bool AudioSynchronizedStream::Open() {
97 if (params_.channels() != kChannels) {
98 LOG(ERROR) << "Only stereo output is currently supported.";
99 return false;
100 }
101
102 // Create the input, output, and varispeed AudioUnits.
103 OSStatus result = CreateAudioUnits();
104 if (result != noErr) {
105 LOG(ERROR) << "Cannot create AudioUnits.";
106 return false;
107 }
108
109 result = SetupInput(input_id_);
110 if (result != noErr) {
111 LOG(ERROR) << "Error configuring input AudioUnit.";
112 return false;
113 }
114
115 result = SetupOutput(output_id_);
116 if (result != noErr) {
117 LOG(ERROR) << "Error configuring output AudioUnit.";
118 return false;
119 }
120
121 result = SetupCallbacks();
122 if (result != noErr) {
123 LOG(ERROR) << "Error setting up callbacks on AudioUnits.";
124 return false;
125 }
126
127 result = SetupStreamFormats();
128 if (result != noErr) {
129 LOG(ERROR) << "Error configuring stream formats on AudioUnits.";
130 return false;
131 }
132
133 AllocateInputData();
134
135 // Final initialization of the AudioUnits.
136 result = AudioUnitInitialize(input_unit_);
137 if (result != noErr) {
138 LOG(ERROR) << "Error initializing input AudioUnit.";
139 return false;
140 }
141
142 result = AudioUnitInitialize(output_unit_);
143 if (result != noErr) {
144 LOG(ERROR) << "Error initializing output AudioUnit.";
145 return false;
146 }
147
148 result = AudioUnitInitialize(varispeed_unit_);
149 if (result != noErr) {
150 LOG(ERROR) << "Error initializing varispeed AudioUnit.";
151 return false;
152 }
153
154 if (input_sample_rate_ != output_sample_rate_) {
155 // Add extra safety margin.
156 target_fifo_frames_ += kAdditionalTargetFifoFrames;
157 }
158
159 // Buffer initial silence corresponding to target I/O buffering.
160 fifo_.Clear();
161 scoped_ptr<AudioBus> silence =
162 AudioBus::Create(channels_, target_fifo_frames_);
163 silence->Zero();
164 fifo_.Push(silence.get());
165
166 return true;
167 }
168
169 void AudioSynchronizedStream::Close() {
170 DCHECK(!is_running_);
171
172 if (input_buffer_list_) {
173 free(input_buffer_list_);
174 input_buffer_list_ = 0;
175 input_bus_.reset(NULL);
176 wrapper_bus_.reset(NULL);
177 }
178
179 if (input_unit_) {
180 AudioUnitUninitialize(input_unit_);
181 CloseComponent(input_unit_);
182 }
183
184 if (output_unit_) {
185 AudioUnitUninitialize(output_unit_);
186 CloseComponent(output_unit_);
187 }
188
189 if (varispeed_unit_) {
190 AudioUnitUninitialize(varispeed_unit_);
191 CloseComponent(varispeed_unit_);
192 }
193
194 input_unit_ = NULL;
195 output_unit_ = NULL;
196 varispeed_unit_ = NULL;
197
198 // Inform the audio manager that we have been closed. This can cause our
199 // destruction.
200 manager_->ReleaseOutputStream(this);
201 }
202
203 void AudioSynchronizedStream::Start(AudioSourceCallback* callback) {
204 DCHECK(callback);
205 DCHECK(input_unit_);
206 DCHECK(output_unit_);
207 DCHECK(varispeed_unit_);
208
209 if (is_running_ || !input_unit_ || !output_unit_ || !varispeed_unit_)
210 return;
211
212 source_ = callback;
213
214 // Reset state variables each time we Start().
215 fifo_rate_compensation_ = 1.0;
216 average_delta_ = 0.0;
217
218 OSStatus result = noErr;
219
220 if (!is_running_) {
221 first_input_time_ = -1;
222
223 result = AudioOutputUnitStart(input_unit_);
224 OSSTATUS_DCHECK(result == noErr, result);
225
226 if (result == noErr) {
227 result = AudioOutputUnitStart(output_unit_);
228 OSSTATUS_DCHECK(result == noErr, result);
229 }
230 }
231
232 is_running_ = true;
233 }
234
235 void AudioSynchronizedStream::Stop() {
236 OSStatus result = noErr;
237 if (is_running_) {
238 result = AudioOutputUnitStop(input_unit_);
239 OSSTATUS_DCHECK(result == noErr, result);
240
241 if (result == noErr) {
242 result = AudioOutputUnitStop(output_unit_);
243 OSSTATUS_DCHECK(result == noErr, result);
244 }
245 }
246
247 if (result == noErr)
248 is_running_ = false;
249 }
250
251 bool AudioSynchronizedStream::IsRunning() {
252 return is_running_;
253 }
254
255 // TODO(crogers): implement - or remove from AudioOutputStream.
256 void AudioSynchronizedStream::SetVolume(double volume) {}
257 void AudioSynchronizedStream::GetVolume(double* volume) {}
258
259 OSStatus AudioSynchronizedStream::SetOutputDeviceAsCurrent(
260 AudioDeviceID output_id) {
261 OSStatus result = noErr;
262
263 // Get the default output device if device is unknown.
264 if (output_id == kAudioDeviceUnknown) {
265 AudioObjectPropertyAddress pa;
266 pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
267 pa.mScope = kAudioObjectPropertyScopeGlobal;
268 pa.mElement = kAudioObjectPropertyElementMaster;
269 UInt32 size = sizeof(output_id);
270
271 result = AudioObjectGetPropertyData(
272 kAudioObjectSystemObject,
273 &pa,
274 0,
275 0,
276 &size,
277 &output_id);
278
279 OSSTATUS_DCHECK(result == noErr, result);
280 if (result != noErr)
281 return result;
282 }
283
284 // Set the render frame size.
285 UInt32 frame_size = hardware_buffer_size_;
286 AudioObjectPropertyAddress pa;
287 pa.mSelector = kAudioDevicePropertyBufferFrameSize;
288 pa.mScope = kAudioDevicePropertyScopeInput;
289 pa.mElement = kAudioObjectPropertyElementMaster;
290 result = AudioObjectSetPropertyData(
291 output_id,
292 &pa,
293 0,
294 0,
295 sizeof(frame_size),
296 &frame_size);
297
298 OSSTATUS_DCHECK(result == noErr, result);
299 if (result != noErr)
300 return result;
301
302 output_info_.Initialize(output_id, false);
303
304 // Set the Current Device to the Default Output Unit.
305 result = AudioUnitSetProperty(
306 output_unit_,
307 kAudioOutputUnitProperty_CurrentDevice,
308 kAudioUnitScope_Global,
309 0,
310 &output_info_.id_,
311 sizeof(output_info_.id_));
312
313 OSSTATUS_DCHECK(result == noErr, result);
314 return result;
315 }
316
317 OSStatus AudioSynchronizedStream::SetInputDeviceAsCurrent(
318 AudioDeviceID input_id) {
319 OSStatus result = noErr;
320
321 // Get the default input device if device is unknown.
322 if (input_id == kAudioDeviceUnknown) {
323 AudioObjectPropertyAddress pa;
324 pa.mSelector = kAudioHardwarePropertyDefaultInputDevice;
325 pa.mScope = kAudioObjectPropertyScopeGlobal;
326 pa.mElement = kAudioObjectPropertyElementMaster;
327 UInt32 size = sizeof(input_id);
328
329 result = AudioObjectGetPropertyData(
330 kAudioObjectSystemObject,
331 &pa,
332 0,
333 0,
334 &size,
335 &input_id);
336
337 OSSTATUS_DCHECK(result == noErr, result);
338 if (result != noErr)
339 return result;
340 }
341
342 // Set the render frame size.
343 UInt32 frame_size = hardware_buffer_size_;
344 AudioObjectPropertyAddress pa;
345 pa.mSelector = kAudioDevicePropertyBufferFrameSize;
346 pa.mScope = kAudioDevicePropertyScopeInput;
347 pa.mElement = kAudioObjectPropertyElementMaster;
348 result = AudioObjectSetPropertyData(
349 input_id,
350 &pa,
351 0,
352 0,
353 sizeof(frame_size),
354 &frame_size);
355
356 OSSTATUS_DCHECK(result == noErr, result);
357 if (result != noErr)
358 return result;
359
360 input_info_.Initialize(input_id, true);
361
362 // Set the Current Device to the AUHAL.
363 // This should be done only after I/O has been enabled on the AUHAL.
364 result = AudioUnitSetProperty(
365 input_unit_,
366 kAudioOutputUnitProperty_CurrentDevice,
367 kAudioUnitScope_Global,
368 0,
369 &input_info_.id_,
370 sizeof(input_info_.id_));
371
372 OSSTATUS_DCHECK(result == noErr, result);
373 return result;
374 }
375
376 OSStatus AudioSynchronizedStream::CreateAudioUnits() {
377 // Q: Why do we need a varispeed unit?
378 // A: If the input device and the output device are running at
379 // different sample rates and/or on different clocks, we will need
380 // to compensate to avoid a pitch change and
381 // to avoid buffer under and over runs.
382 ComponentDescription varispeed_desc;
383 varispeed_desc.componentType = kAudioUnitType_FormatConverter;
384 varispeed_desc.componentSubType = kAudioUnitSubType_Varispeed;
385 varispeed_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
386 varispeed_desc.componentFlags = 0;
387 varispeed_desc.componentFlagsMask = 0;
388
389 Component varispeed_comp = FindNextComponent(NULL, &varispeed_desc);
390 if (varispeed_comp == NULL)
391 return -1;
392
393 OSStatus result = OpenAComponent(varispeed_comp, &varispeed_unit_);
394 OSSTATUS_DCHECK(result == noErr, result);
395 if (result != noErr)
396 return result;
397
398 // Open input AudioUnit.
399 ComponentDescription input_desc;
400 input_desc.componentType = kAudioUnitType_Output;
401 input_desc.componentSubType = kAudioUnitSubType_HALOutput;
402 input_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
403 input_desc.componentFlags = 0;
404 input_desc.componentFlagsMask = 0;
405
406 Component input_comp = FindNextComponent(NULL, &input_desc);
407 if (input_comp == NULL)
408 return -1;
409
410 result = OpenAComponent(input_comp, &input_unit_);
411 OSSTATUS_DCHECK(result == noErr, result);
412 if (result != noErr)
413 return result;
414
415 // Open output AudioUnit.
416 ComponentDescription output_desc;
417 output_desc.componentType = kAudioUnitType_Output;
418 output_desc.componentSubType = kAudioUnitSubType_DefaultOutput;
419 output_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
420 output_desc.componentFlags = 0;
421 output_desc.componentFlagsMask = 0;
422
423 Component output_comp = FindNextComponent(NULL, &output_desc);
424 if (output_comp == NULL)
425 return -1;
426
427 result = OpenAComponent(output_comp, &output_unit_);
428 OSSTATUS_DCHECK(result == noErr, result);
429 if (result != noErr)
430 return result;
431
432 return noErr;
433 }
434
435 OSStatus AudioSynchronizedStream::SetupInput(AudioDeviceID input_id) {
436 // The AUHAL used for input needs to be initialized
437 // before anything is done to it.
438 OSStatus result = AudioUnitInitialize(input_unit_);
439 OSSTATUS_DCHECK(result == noErr, result);
440 if (result != noErr)
441 return result;
442
443 // We must enable the Audio Unit (AUHAL) for input and disable output
444 // BEFORE setting the AUHAL's current device.
445 result = EnableIO();
446 OSSTATUS_DCHECK(result == noErr, result);
447 if (result != noErr)
448 return result;
449
450 result = SetInputDeviceAsCurrent(input_id);
451 OSSTATUS_DCHECK(result == noErr, result);
452
453 return result;
454 }
455
456 OSStatus AudioSynchronizedStream::EnableIO() {
457 // Enable input on the AUHAL.
458 UInt32 enable_io = 1;
459 OSStatus result = AudioUnitSetProperty(
460 input_unit_,
461 kAudioOutputUnitProperty_EnableIO,
462 kAudioUnitScope_Input,
463 1, // input element
464 &enable_io,
465 sizeof(enable_io));
466
467 OSSTATUS_DCHECK(result == noErr, result);
468 if (result != noErr)
469 return result;
470
471 // Disable Output on the AUHAL.
472 enable_io = 0;
473 result = AudioUnitSetProperty(
474 input_unit_,
475 kAudioOutputUnitProperty_EnableIO,
476 kAudioUnitScope_Output,
477 0, // output element
478 &enable_io,
479 sizeof(enable_io));
480
481 OSSTATUS_DCHECK(result == noErr, result);
482 return result;
483 }
484
485 OSStatus AudioSynchronizedStream::SetupOutput(AudioDeviceID output_id) {
486 OSStatus result = noErr;
487
488 result = SetOutputDeviceAsCurrent(output_id);
489 OSSTATUS_DCHECK(result == noErr, result);
490 if (result != noErr)
491 return result;
492
493 // Tell the output unit not to reset timestamps.
494 // Otherwise sample rate changes will cause sync loss.
495 UInt32 start_at_zero = 0;
496 result = AudioUnitSetProperty(
497 output_unit_,
498 kAudioOutputUnitProperty_StartTimestampsAtZero,
499 kAudioUnitScope_Global,
500 0,
501 &start_at_zero,
502 sizeof(start_at_zero));
503
504 OSSTATUS_DCHECK(result == noErr, result);
505
506 return result;
507 }
508
509 OSStatus AudioSynchronizedStream::SetupCallbacks() {
510 // Set the input callback.
511 AURenderCallbackStruct callback;
512 callback.inputProc = InputProc;
513 callback.inputProcRefCon = this;
514 OSStatus result = AudioUnitSetProperty(
515 input_unit_,
516 kAudioOutputUnitProperty_SetInputCallback,
517 kAudioUnitScope_Global,
518 0,
519 &callback,
520 sizeof(callback));
521
522 OSSTATUS_DCHECK(result == noErr, result);
523 if (result != noErr)
524 return result;
525
526 // Set the output callback.
527 callback.inputProc = OutputProc;
528 callback.inputProcRefCon = this;
529 result = AudioUnitSetProperty(
530 output_unit_,
531 kAudioUnitProperty_SetRenderCallback,
532 kAudioUnitScope_Input,
533 0,
534 &callback,
535 sizeof(callback));
536
537 OSSTATUS_DCHECK(result == noErr, result);
538 if (result != noErr)
539 return result;
540
541 // Set the varispeed callback.
542 callback.inputProc = VarispeedProc;
543 callback.inputProcRefCon = this;
544 result = AudioUnitSetProperty(
545 varispeed_unit_,
546 kAudioUnitProperty_SetRenderCallback,
547 kAudioUnitScope_Input,
548 0,
549 &callback,
550 sizeof(callback));
551
552 OSSTATUS_DCHECK(result == noErr, result);
553
554 return result;
555 }
556
557 OSStatus AudioSynchronizedStream::SetupStreamFormats() {
558 AudioStreamBasicDescription asbd, asbd_dev1_in, asbd_dev2_out;
559
560 // Get the Stream Format (Output client side).
561 UInt32 property_size = sizeof(asbd_dev1_in);
562 OSStatus result = AudioUnitGetProperty(
563 input_unit_,
564 kAudioUnitProperty_StreamFormat,
565 kAudioUnitScope_Input,
566 1,
567 &asbd_dev1_in,
568 &property_size);
569
570 OSSTATUS_DCHECK(result == noErr, result);
571 if (result != noErr)
572 return result;
573
574 // Get the Stream Format (client side).
575 property_size = sizeof(asbd);
576 result = AudioUnitGetProperty(
577 input_unit_,
578 kAudioUnitProperty_StreamFormat,
579 kAudioUnitScope_Output,
580 1,
581 &asbd,
582 &property_size);
583
584 OSSTATUS_DCHECK(result == noErr, result);
585 if (result != noErr)
586 return result;
587
588 // Get the Stream Format (Output client side).
589 property_size = sizeof(asbd_dev2_out);
590 result = AudioUnitGetProperty(
591 output_unit_,
592 kAudioUnitProperty_StreamFormat,
593 kAudioUnitScope_Output,
594 0,
595 &asbd_dev2_out,
596 &property_size);
597
598 OSSTATUS_DCHECK(result == noErr, result);
599 if (result != noErr)
600 return result;
601
602 // Set the format of all the AUs to the input/output devices channel count.
603 // For a simple case, you want to set this to
604 // the lower of count of the channels in the input device vs output device.
605 asbd.mChannelsPerFrame = std::min(asbd_dev1_in.mChannelsPerFrame,
606 asbd_dev2_out.mChannelsPerFrame);
607
608 // We must get the sample rate of the input device and set it to the
609 // stream format of AUHAL.
610 Float64 rate = 0;
611 property_size = sizeof(rate);
612
613 AudioObjectPropertyAddress pa;
614 pa.mSelector = kAudioDevicePropertyNominalSampleRate;
615 pa.mScope = kAudioObjectPropertyScopeWildcard;
616 pa.mElement = kAudioObjectPropertyElementMaster;
617 result = AudioObjectGetPropertyData(
618 input_info_.id_,
619 &pa,
620 0,
621 0,
622 &property_size,
623 &rate);
624
625 OSSTATUS_DCHECK(result == noErr, result);
626 if (result != noErr)
627 return result;
628
629 input_sample_rate_ = rate;
630
631 asbd.mSampleRate = rate;
632 property_size = sizeof(asbd);
633
634 // Set the new formats to the AUs...
635 result = AudioUnitSetProperty(
636 input_unit_,
637 kAudioUnitProperty_StreamFormat,
638 kAudioUnitScope_Output,
639 1,
640 &asbd,
641 property_size);
642
643 OSSTATUS_DCHECK(result == noErr, result);
644 if (result != noErr)
645 return result;
646
647 result = AudioUnitSetProperty(
648 varispeed_unit_,
649 kAudioUnitProperty_StreamFormat,
650 kAudioUnitScope_Input,
651 0,
652 &asbd,
653 property_size);
654
655 OSSTATUS_DCHECK(result == noErr, result);
656 if (result != noErr)
657 return result;
658
659 // Set the correct sample rate for the output device,
660 // but keep the channel count the same.
661 property_size = sizeof(rate);
662
663 pa.mSelector = kAudioDevicePropertyNominalSampleRate;
664 pa.mScope = kAudioObjectPropertyScopeWildcard;
665 pa.mElement = kAudioObjectPropertyElementMaster;
666 result = AudioObjectGetPropertyData(
667 output_info_.id_,
668 &pa,
669 0,
670 0,
671 &property_size,
672 &rate);
673
674 OSSTATUS_DCHECK(result == noErr, result);
675 if (result != noErr)
676 return result;
677
678 output_sample_rate_ = rate;
679
680 // The requested sample-rate must match the hardware sample-rate.
681 if (output_sample_rate_ != params_.sample_rate()) {
682 LOG(ERROR) << "Requested sample-rate: " << params_.sample_rate()
683 << " must match the hardware sample-rate: " << output_sample_rate_;
684 return kAudioDeviceUnsupportedFormatError;
685 }
686
687 asbd.mSampleRate = rate;
688 property_size = sizeof(asbd);
689
690 // Set the new audio stream formats for the rest of the AUs...
691 result = AudioUnitSetProperty(
692 varispeed_unit_,
693 kAudioUnitProperty_StreamFormat,
694 kAudioUnitScope_Output,
695 0,
696 &asbd,
697 property_size);
698
699 OSSTATUS_DCHECK(result == noErr, result);
700 if (result != noErr)
701 return result;
702
703 result = AudioUnitSetProperty(
704 output_unit_,
705 kAudioUnitProperty_StreamFormat,
706 kAudioUnitScope_Input,
707 0,
708 &asbd,
709 property_size);
710
711 OSSTATUS_DCHECK(result == noErr, result);
712 return result;
713 }
714
715 void AudioSynchronizedStream::AllocateInputData() {
716 // Allocate storage for the AudioBufferList used for the
717 // input data from the input AudioUnit.
718 // We allocate enough space for with one AudioBuffer per channel.
719 size_t malloc_size = offsetof(AudioBufferList, mBuffers[0]) +
720 (sizeof(AudioBuffer) * channels_);
721
722 input_buffer_list_ = static_cast<AudioBufferList*>(malloc(malloc_size));
723 input_buffer_list_->mNumberBuffers = channels_;
724
725 input_bus_ = AudioBus::Create(channels_, hardware_buffer_size_);
726 wrapper_bus_ = AudioBus::CreateWrapper(channels_);
727
728 // Allocate buffers for AudioBufferList.
729 UInt32 buffer_size_bytes = input_bus_->frames() * sizeof(Float32);
730 for (size_t i = 0; i < input_buffer_list_->mNumberBuffers; ++i) {
731 input_buffer_list_->mBuffers[i].mNumberChannels = 1;
732 input_buffer_list_->mBuffers[i].mDataByteSize = buffer_size_bytes;
733 input_buffer_list_->mBuffers[i].mData = input_bus_->channel(i);
734 }
735 }
736
737 OSStatus AudioSynchronizedStream::HandleInputCallback(
738 AudioUnitRenderActionFlags* io_action_flags,
739 const AudioTimeStamp* time_stamp,
740 UInt32 bus_number,
741 UInt32 number_of_frames,
742 AudioBufferList* io_data) {
743 TRACE_EVENT0("audio", "AudioSynchronizedStream::HandleInputCallback");
744
745 if (first_input_time_ < 0.0)
746 first_input_time_ = time_stamp->mSampleTime;
747
748 // Get the new audio input data.
749 OSStatus result = AudioUnitRender(
750 input_unit_,
751 io_action_flags,
752 time_stamp,
753 bus_number,
754 number_of_frames,
755 input_buffer_list_);
756
757 OSSTATUS_DCHECK(result == noErr, result);
758 if (result != noErr)
759 return result;
760
761 // Buffer input into FIFO.
762 int available_frames = fifo_.max_frames() - fifo_.frames();
763 if (input_bus_->frames() <= available_frames)
764 fifo_.Push(input_bus_.get());
765
766 return result;
767 }
768
769 OSStatus AudioSynchronizedStream::HandleVarispeedCallback(
770 AudioUnitRenderActionFlags* io_action_flags,
771 const AudioTimeStamp* time_stamp,
772 UInt32 bus_number,
773 UInt32 number_of_frames,
774 AudioBufferList* io_data) {
775 // Create a wrapper bus on the AudioBufferList.
776 WrapBufferList(io_data, wrapper_bus_.get(), number_of_frames);
777
778 if (fifo_.frames() < static_cast<int>(number_of_frames)) {
779 // We don't DCHECK here, since this is a possible run-time condition
780 // if the machine is bogged down.
781 wrapper_bus_->Zero();
782 return noErr;
783 }
784
785 // Read from the FIFO to feed the varispeed.
786 fifo_.Consume(wrapper_bus_.get(), 0, number_of_frames);
787
788 return noErr;
789 }
790
791 OSStatus AudioSynchronizedStream::HandleOutputCallback(
792 AudioUnitRenderActionFlags* io_action_flags,
793 const AudioTimeStamp* time_stamp,
794 UInt32 bus_number,
795 UInt32 number_of_frames,
796 AudioBufferList* io_data) {
797 if (first_input_time_ < 0.0) {
798 // Input callback hasn't run yet -> silence.
799 ZeroBufferList(io_data);
800 return noErr;
801 }
802
803 // Use the varispeed playback rate to offset small discrepancies
804 // in hardware clocks, and also any differences in sample-rate
805 // between input and output devices.
806
807 // Calculate a varispeed rate scalar factor to compensate for drift between
808 // input and output. We use the actual number of frames still in the FIFO
809 // compared with the ideal value of |target_fifo_frames_|.
810 int delta = fifo_.frames() - target_fifo_frames_;
811
812 // Average |delta| because it can jitter back/forth quite frequently
813 // by +/- the hardware buffer-size *if* the input and output callbacks are
814 // happening at almost exactly the same time. Also, if the input and output
815 // sample-rates are different then |delta| will jitter quite a bit due to
816 // the rate conversion happening in the varispeed, plus the jittering of
817 // the callbacks. The average value is what's important here.
818 average_delta_ += (delta - average_delta_) * 0.1;
819
820 // Compute a rate compensation which always attracts us back to the
821 // |target_fifo_frames_| over a period of kCorrectionTimeSeconds.
822 const double kCorrectionTimeSeconds = 0.1;
823 double correction_time_frames = kCorrectionTimeSeconds * output_sample_rate_;
824 fifo_rate_compensation_ =
825 (correction_time_frames + average_delta_) / correction_time_frames;
826
827 // Adjust for FIFO drift.
828 OSStatus result = AudioUnitSetParameter(
829 varispeed_unit_,
830 kVarispeedParam_PlaybackRate,
831 kAudioUnitScope_Global,
832 0,
833 fifo_rate_compensation_,
834 0);
835
836 OSSTATUS_DCHECK(result == noErr, result);
837 if (result != noErr)
838 return result;
839
840 // Render to the output using the varispeed.
841 result = AudioUnitRender(
842 varispeed_unit_,
843 io_action_flags,
844 time_stamp,
845 0,
846 number_of_frames,
847 io_data);
848
849 OSSTATUS_DCHECK(result == noErr, result);
850 if (result != noErr)
851 return result;
852
853 // Create a wrapper bus on the AudioBufferList.
854 WrapBufferList(io_data, wrapper_bus_.get(), number_of_frames);
855
856 // Process in-place!
857 source_->OnMoreIOData(wrapper_bus_.get(),
858 wrapper_bus_.get(),
859 AudioBuffersState(0, 0));
860
861 return noErr;
862 }
863
864 OSStatus AudioSynchronizedStream::InputProc(
865 void* user_data,
866 AudioUnitRenderActionFlags* io_action_flags,
867 const AudioTimeStamp* time_stamp,
868 UInt32 bus_number,
869 UInt32 number_of_frames,
870 AudioBufferList* io_data) {
871 AudioSynchronizedStream* stream =
872 static_cast<AudioSynchronizedStream*>(user_data);
873 DCHECK(stream);
874
875 return stream->HandleInputCallback(
876 io_action_flags,
877 time_stamp,
878 bus_number,
879 number_of_frames,
880 io_data);
881 }
882
883 OSStatus AudioSynchronizedStream::VarispeedProc(
884 void* user_data,
885 AudioUnitRenderActionFlags* io_action_flags,
886 const AudioTimeStamp* time_stamp,
887 UInt32 bus_number,
888 UInt32 number_of_frames,
889 AudioBufferList* io_data) {
890 AudioSynchronizedStream* stream =
891 static_cast<AudioSynchronizedStream*>(user_data);
892 DCHECK(stream);
893
894 return stream->HandleVarispeedCallback(
895 io_action_flags,
896 time_stamp,
897 bus_number,
898 number_of_frames,
899 io_data);
900 }
901
902 OSStatus AudioSynchronizedStream::OutputProc(
903 void* user_data,
904 AudioUnitRenderActionFlags* io_action_flags,
905 const AudioTimeStamp* time_stamp,
906 UInt32 bus_number,
907 UInt32 number_of_frames,
908 AudioBufferList* io_data) {
909 AudioSynchronizedStream* stream =
910 static_cast<AudioSynchronizedStream*>(user_data);
911 DCHECK(stream);
912
913 return stream->HandleOutputCallback(
914 io_action_flags,
915 time_stamp,
916 bus_number,
917 number_of_frames,
918 io_data);
919 }
920
921 void AudioSynchronizedStream::AudioDeviceInfo::Initialize(
922 AudioDeviceID id, bool is_input) {
923 id_ = id;
924 is_input_ = is_input;
925 if (id_ == kAudioDeviceUnknown)
926 return;
927
928 UInt32 property_size = sizeof(buffer_size_frames_);
929
930 AudioObjectPropertyAddress pa;
931 pa.mSelector = kAudioDevicePropertyBufferFrameSize;
932 pa.mScope = kAudioObjectPropertyScopeWildcard;
933 pa.mElement = kAudioObjectPropertyElementMaster;
934 OSStatus result = AudioObjectGetPropertyData(
935 id_,
936 &pa,
937 0,
938 0,
939 &property_size,
940 &buffer_size_frames_);
941
942 OSSTATUS_DCHECK(result == noErr, result);
943 }
944
945 } // namespace media
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698