Chromium Code Reviews| OLD | NEW | 
|---|---|
| (Empty) | |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "media/audio/mac/audio_unified_mac.h" | |
| 6 | |
| 7 #include <CoreServices/CoreServices.h> | |
| 8 | |
| 9 #include "base/basictypes.h" | |
| 10 #include "base/logging.h" | |
| 11 #include "base/mac/mac_logging.h" | |
| 12 #include "media/audio/audio_util.h" | |
| 13 #include "media/audio/mac/audio_manager_mac.h" | |
| 14 | |
| 15 namespace media { | |
| 16 | |
| 17 // TODO(crogers): support more than hard-coded stereo input. | |
| 18 // Ideally we would like to receive this value as a constructor argument. | |
| 19 int AudioHardwareUnifiedStream::kDefaultInputChannels = 2; | |
| 20 | |
| 21 AudioHardwareUnifiedStream::AudioHardwareUnifiedStream( | |
| 22 AudioManagerMac* manager, const AudioParameters& params) | |
| 23 : manager_(manager), | |
| 24 source_(NULL), | |
| 25 client_input_channels_(kDefaultInputChannels), | |
| 26 volume_(1.0f), | |
| 27 input_channels_(0), | |
| 28 output_channels_(0), | |
| 29 input_channels_per_frame_(0), | |
| 30 output_channels_per_frame_(0), | |
| 31 io_proc_id_(0), | |
| 32 device_(kAudioObjectUnknown), | |
| 33 is_playing_(false) { | |
| 34 DCHECK(manager_); | |
| 35 | |
| 36 // A frame is one sample across all channels. In interleaved audio the per | |
| 37 // frame fields identify the set of n |channels|. In uncompressed audio, a | |
| 38 // packet is always one frame. | |
| 39 format_.mSampleRate = params.sample_rate(); | |
| 40 format_.mFormatID = kAudioFormatLinearPCM; | |
| 41 format_.mFormatFlags = kLinearPCMFormatFlagIsPacked | | |
| 42 kLinearPCMFormatFlagIsSignedInteger; | |
| 43 format_.mBitsPerChannel = params.bits_per_sample(); | |
| 44 format_.mChannelsPerFrame = params.channels(); | |
| 45 format_.mFramesPerPacket = 1; | |
| 46 format_.mBytesPerPacket = (format_.mBitsPerChannel * params.channels()) / 8; | |
| 47 format_.mBytesPerFrame = format_.mBytesPerPacket; | |
| 48 format_.mReserved = 0; | |
| 49 | |
| 50 // Calculate the number of sample frames per callback. | |
| 51 number_of_frames_ = params.GetBytesPerBuffer() / format_.mBytesPerPacket; | |
| 52 | |
| 53 input_bus_ = AudioBus::Create(client_input_channels_, | |
| 54 params.frames_per_buffer()); | |
| 55 output_bus_ = AudioBus::Create(params); | |
| 56 } | |
| 57 | |
| 58 AudioHardwareUnifiedStream::~AudioHardwareUnifiedStream() { | |
| 59 DCHECK_EQ(device_, kAudioObjectUnknown); | |
| 60 } | |
| 61 | |
| 62 bool AudioHardwareUnifiedStream::Open() { | |
| 63 // Obtain the current output device selected by the user. | |
| 64 AudioObjectPropertyAddress pa; | |
| 65 pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice; | |
| 66 pa.mScope = kAudioObjectPropertyScopeGlobal; | |
| 67 pa.mElement = kAudioObjectPropertyElementMaster; | |
| 68 | |
| 69 UInt32 size = sizeof(device_); | |
| 70 | |
| 71 OSStatus result = AudioObjectGetPropertyData( | |
| 72 kAudioObjectSystemObject, | |
| 73 &pa, | |
| 74 0, | |
| 75 0, | |
| 76 &size, | |
| 77 &device_); | |
| 78 | |
| 79 if ((result != kAudioHardwareNoError) || (device_ == kAudioDeviceUnknown)) { | |
| 80 LOG(ERROR) << "Cannot open unified AudioDevice."; | |
| 81 return false; | |
| 82 } | |
| 83 | |
| 84 // The requested sample-rate must match the hardware sample-rate. | |
| 85 Float64 sample_rate = 0.0; | |
| 86 size = sizeof(sample_rate); | |
| 87 | |
| 88 pa.mSelector = kAudioDevicePropertyNominalSampleRate; | |
| 89 pa.mScope = kAudioObjectPropertyScopeWildcard; | |
| 90 pa.mElement = kAudioObjectPropertyElementMaster; | |
| 91 | |
| 92 result = AudioObjectGetPropertyData( | |
| 93 device_, | |
| 94 &pa, | |
| 95 0, | |
| 96 0, | |
| 97 &size, | |
| 98 &sample_rate); | |
| 99 | |
| 100 if (result != noErr || sample_rate != format_.mSampleRate) { | |
| 101 LOG(ERROR) << "Requested sample-rate must match the hardware sample-rate."; | |
| 
 
no longer working on chromium
2012/09/11 15:40:28
nit, we can log only one time:
<< "Requested sampl
 
Chris Rogers
2012/09/14 19:17:59
Done.
 
 | |
| 102 LOG(ERROR) << "Requested sample-rate: " << format_.mSampleRate; | |
| 103 LOG(ERROR) << "Hardware sample-rate: " << sample_rate; | |
| 104 return false; | |
| 105 } | |
| 106 | |
| 107 // Configure buffer frame size. | |
| 108 UInt32 frame_size = number_of_frames_; | |
| 109 | |
| 110 pa.mSelector = kAudioDevicePropertyBufferFrameSize; | |
| 111 pa.mScope = kAudioDevicePropertyScopeInput; | |
| 112 pa.mElement = kAudioObjectPropertyElementMaster; | |
| 113 result = AudioObjectSetPropertyData( | |
| 114 device_, | |
| 115 &pa, | |
| 116 0, | |
| 117 0, | |
| 118 sizeof(frame_size), | |
| 119 &frame_size); | |
| 120 | |
| 121 if (result != noErr) { | |
| 122 LOG(ERROR) << "Unable to set input buffer frame size: " << frame_size; | |
| 123 return false; | |
| 124 } | |
| 125 | |
| 126 pa.mScope = kAudioDevicePropertyScopeOutput; | |
| 127 result = AudioObjectSetPropertyData( | |
| 128 device_, | |
| 129 &pa, | |
| 130 0, | |
| 131 0, | |
| 132 sizeof(frame_size), | |
| 133 &frame_size); | |
| 134 | |
| 135 if (result != noErr) { | |
| 136 LOG(ERROR) << "Unable to set output buffer frame size: " << frame_size; | |
| 137 return false; | |
| 138 } | |
| 139 | |
| 140 DVLOG(1) << "Sample rate: " << sample_rate; | |
| 141 DVLOG(1) << "Frame size: " << frame_size; | |
| 142 | |
| 143 // Determine the number of input and output channels. | |
| 144 // We handle both the interleaved and non-interleaved cases. | |
| 145 | |
| 146 // Get input stream configuration. | |
| 147 pa.mSelector = kAudioDevicePropertyStreamConfiguration; | |
| 148 pa.mScope = kAudioDevicePropertyScopeInput; | |
| 149 pa.mElement = kAudioObjectPropertyElementMaster; | |
| 150 | |
| 151 result = AudioObjectGetPropertyDataSize(device_, &pa, 0, 0, &size); | |
| 152 OSSTATUS_DCHECK(result == noErr, result); | |
| 153 | |
| 154 if (result == noErr && size > 0) { | |
| 155 // Allocate storage. | |
| 156 scoped_array<uint8> input_list_storage(new uint8[size]); | |
| 157 AudioBufferList& input_list = | |
| 158 *reinterpret_cast<AudioBufferList*>(input_list_storage.get()); | |
| 159 | |
| 160 result = AudioObjectGetPropertyData( | |
| 161 device_, | |
| 162 &pa, | |
| 163 0, | |
| 164 0, | |
| 165 &size, | |
| 166 &input_list); | |
| 167 OSSTATUS_DCHECK(result == noErr, result); | |
| 168 | |
| 169 if (result == noErr) { | |
| 170 // Determine number of input channels. | |
| 171 input_channels_per_frame_ = input_list.mNumberBuffers > 0 ? | |
| 172 input_list.mBuffers[0].mNumberChannels : 0; | |
| 173 if (input_channels_per_frame_ == 1 && input_list.mNumberBuffers > 1) { | |
| 174 // Non-interleaved. | |
| 175 input_channels_ = input_list.mNumberBuffers; | |
| 176 } else { | |
| 177 // Interleaved. | |
| 178 input_channels_ = input_channels_per_frame_; | |
| 179 } | |
| 180 } | |
| 181 } | |
| 182 | |
| 183 DVLOG(1) << "Input channels: " << input_channels_; | |
| 184 DVLOG(1) << "Input channels per frame: " << input_channels_per_frame_; | |
| 185 | |
| 186 // The hardware must have at least the requested input channels. | |
| 187 if (result != noErr || client_input_channels_ > input_channels_) { | |
| 188 LOG(ERROR) << "AudioDevice does not support requested input channels."; | |
| 189 return false; | |
| 190 } | |
| 191 | |
| 192 // Get output stream configuration. | |
| 193 pa.mSelector = kAudioDevicePropertyStreamConfiguration; | |
| 194 pa.mScope = kAudioDevicePropertyScopeOutput; | |
| 195 pa.mElement = kAudioObjectPropertyElementMaster; | |
| 196 | |
| 197 result = AudioObjectGetPropertyDataSize(device_, &pa, 0, 0, &size); | |
| 198 OSSTATUS_DCHECK(result == noErr, result); | |
| 199 | |
| 200 if (result == noErr && size > 0) { | |
| 201 // Allocate storage. | |
| 202 scoped_array<uint8> output_list_storage(new uint8[size]); | |
| 203 AudioBufferList& output_list = | |
| 204 *reinterpret_cast<AudioBufferList*>(output_list_storage.get()); | |
| 205 | |
| 206 result = AudioObjectGetPropertyData( | |
| 207 device_, | |
| 208 &pa, | |
| 209 0, | |
| 210 0, | |
| 211 &size, | |
| 212 &output_list); | |
| 213 OSSTATUS_DCHECK(result == noErr, result); | |
| 214 | |
| 215 if (result == noErr) { | |
| 216 // Determine number of output channels. | |
| 217 output_channels_per_frame_ = output_list.mBuffers[0].mNumberChannels; | |
| 218 if (output_channels_per_frame_ == 1 && output_list.mNumberBuffers > 1) { | |
| 219 // Non-interleaved. | |
| 220 output_channels_ = output_list.mNumberBuffers; | |
| 221 } else { | |
| 222 // Interleaved. | |
| 223 output_channels_ = output_channels_per_frame_; | |
| 224 } | |
| 225 } | |
| 226 } | |
| 227 | |
| 228 DVLOG(1) << "Output channels: " << output_channels_; | |
| 229 DVLOG(1) << "Output channels per frame: " << output_channels_per_frame_; | |
| 230 | |
| 231 // The hardware must have at least the requested output channels. | |
| 232 if (result != noErr || | |
| 233 output_channels_ < static_cast<int>(format_.mChannelsPerFrame)) { | |
| 234 LOG(ERROR) << "AudioDevice does not support requested output channels."; | |
| 235 return false; | |
| 236 } | |
| 237 | |
| 238 // Setup the I/O proc. | |
| 239 result = AudioDeviceCreateIOProcID(device_, RenderProc, this, &io_proc_id_); | |
| 240 if (result != noErr) { | |
| 241 LOG(ERROR) << "Error creating IOProc."; | |
| 242 return false; | |
| 243 } | |
| 244 | |
| 245 return true; | |
| 246 } | |
| 247 | |
| 248 void AudioHardwareUnifiedStream::Close() { | |
| 249 DCHECK(!is_playing_); | |
| 250 | |
| 251 OSStatus result = AudioDeviceDestroyIOProcID(device_, io_proc_id_); | |
| 252 OSSTATUS_DCHECK(result == noErr, result); | |
| 253 | |
| 254 io_proc_id_ = 0; | |
| 255 device_ = kAudioObjectUnknown; | |
| 256 | |
| 257 // Inform the audio manager that we have been closed. This can cause our | |
| 258 // destruction. | |
| 259 manager_->ReleaseOutputStream(this); | |
| 260 } | |
| 261 | |
| 262 void AudioHardwareUnifiedStream::Start(AudioSourceCallback* callback) { | |
| 263 DCHECK(callback); | |
| 264 DCHECK_NE(device_, kAudioObjectUnknown); | |
| 265 DCHECK(!is_playing_); | |
| 266 if (device_ == kAudioObjectUnknown || is_playing_) | |
| 267 return; | |
| 268 | |
| 269 source_ = callback; | |
| 270 | |
| 271 OSStatus result = AudioDeviceStart(device_, io_proc_id_); | |
| 272 OSSTATUS_DCHECK(result == noErr, result); | |
| 273 | |
| 274 if (result == noErr) | |
| 275 is_playing_ = true; | |
| 276 } | |
| 277 | |
| 278 void AudioHardwareUnifiedStream::Stop() { | |
| 279 if (!is_playing_) | |
| 280 return; | |
| 281 | |
| 282 source_ = NULL; | |
| 283 | |
| 284 if (device_ != kAudioObjectUnknown) { | |
| 285 OSStatus result = AudioDeviceStop(device_, io_proc_id_); | |
| 
 
no longer working on chromium
2012/09/11 15:40:28
I think you will have a compiling warning about "U
 
 | |
| 286 OSSTATUS_DCHECK(result == noErr, result); | |
| 287 } | |
| 288 | |
| 289 is_playing_ = false; | |
| 290 } | |
| 291 | |
| 292 void AudioHardwareUnifiedStream::SetVolume(double volume) { | |
| 293 volume_ = static_cast<float>(volume); | |
| 294 // TODO(crogers): set volume property | |
| 295 } | |
| 296 | |
| 297 void AudioHardwareUnifiedStream::GetVolume(double* volume) { | |
| 298 *volume = volume_; | |
| 299 } | |
| 300 | |
| 301 // Pulls on our provider with optional input, asking it to render output. | |
| 302 // Note to future hackers of this function: Do not add locks here because this | |
| 303 // is running on a real-time thread (for low-latency). | |
| 304 OSStatus AudioHardwareUnifiedStream::Render( | |
| 
 
no longer working on chromium
2012/09/11 15:40:28
This function will only return noErr, how do you t
 
Chris Rogers
2012/09/14 19:17:59
It's probably fine either way, but leaving it beca
 
 | |
| 305 AudioDeviceID device, | |
| 306 const AudioTimeStamp* now, | |
| 307 const AudioBufferList* input_data, | |
| 308 const AudioTimeStamp* input_time, | |
| 309 AudioBufferList* output_data, | |
| 310 const AudioTimeStamp* output_time) { | |
| 311 // Convert the input data accounting for possible interleaving. | |
| 312 // TODO(crogers): it's better to simply memcpy() if source is already planar. | |
| 313 if (input_channels_ >= client_input_channels_) { | |
| 314 for (int channel_index = 0; channel_index < client_input_channels_; | |
| 315 ++channel_index) { | |
| 316 float* source; | |
| 317 | |
| 318 int source_channel_index = channel_index; | |
| 319 | |
| 320 if (input_channels_per_frame_ > 1) { | |
| 321 // Interleaved. | |
| 322 source = static_cast<float*>(input_data->mBuffers[0].mData) + | |
| 323 source_channel_index; | |
| 324 } else { | |
| 325 // Non-interleaved. | |
| 326 source = static_cast<float*>( | |
| 327 input_data->mBuffers[source_channel_index].mData); | |
| 328 } | |
| 329 | |
| 330 float* p = input_bus_->channel(channel_index); | |
| 331 for (int i = 0; i < number_of_frames_; ++i) { | |
| 332 p[i] = *source; | |
| 333 source += input_channels_per_frame_; | |
| 334 } | |
| 335 } | |
| 336 } else if (input_channels_) { | |
| 337 input_bus_->Zero(); | |
| 338 } | |
| 339 | |
| 340 // Give the client optional input data and have it render the output data. | |
| 341 source_->OnMoreIOData(input_bus_.get(), | |
| 342 output_bus_.get(), | |
| 343 AudioBuffersState(0, 0)); | |
| 344 | |
| 345 // TODO(crogers): handle final Core Audio 5.1 layout for 5.1 audio. | |
| 346 | |
| 347 // Handle interleaving as necessary. | |
| 348 // TODO(crogers): it's better to simply memcpy() if dest is already planar. | |
| 349 | |
| 350 for (unsigned channel_index = 0; channel_index < format_.mChannelsPerFrame; | |
| 
 
scherkus (not reviewing)
2012/09/11 12:22:25
s/unsigned/int/
 
Chris Rogers
2012/09/14 19:17:59
Done.
 
 | |
| 351 ++channel_index) { | |
| 352 float* dest; | |
| 353 | |
| 354 unsigned dest_channel_index = channel_index; | |
| 
 
scherkus (not reviewing)
2012/09/11 12:22:25
s/unsigned/int/
 
Chris Rogers
2012/09/14 19:17:59
Done.
 
 | |
| 355 | |
| 356 if (output_channels_per_frame_ > 1) { | |
| 357 // Interleaved. | |
| 358 dest = static_cast<float*>(output_data->mBuffers[0].mData) + | |
| 359 dest_channel_index; | |
| 360 } else { | |
| 361 // Non-interleaved. | |
| 362 dest = static_cast<float*>( | |
| 363 output_data->mBuffers[dest_channel_index].mData); | |
| 364 } | |
| 365 | |
| 366 float* p = output_bus_->channel(channel_index); | |
| 367 for (int i = 0; i < number_of_frames_; ++i) { | |
| 368 *dest = p[i]; | |
| 369 dest += output_channels_per_frame_; | |
| 370 } | |
| 371 } | |
| 372 | |
| 373 return noErr; | |
| 374 } | |
| 375 | |
| 376 OSStatus AudioHardwareUnifiedStream::RenderProc( | |
| 377 AudioDeviceID device, | |
| 378 const AudioTimeStamp* now, | |
| 379 const AudioBufferList* input_data, | |
| 380 const AudioTimeStamp* input_time, | |
| 381 AudioBufferList* output_data, | |
| 382 const AudioTimeStamp* output_time, | |
| 383 void* user_data) { | |
| 384 AudioHardwareUnifiedStream* audio_output = | |
| 385 static_cast<AudioHardwareUnifiedStream*>(user_data); | |
| 386 DCHECK(audio_output); | |
| 387 if (!audio_output) | |
| 388 return -1; | |
| 
 
scherkus (not reviewing)
2012/09/11 12:22:25
q: is there a better return code to return here or
 
Chris Rogers
2012/09/14 19:17:59
This is really an anomaly which should never happe
 
 | |
| 389 | |
| 390 return audio_output->Render( | |
| 391 device, | |
| 392 now, | |
| 393 input_data, | |
| 394 input_time, | |
| 395 output_data, | |
| 396 output_time); | |
| 397 } | |
| 398 | |
| 399 } // namespace media | |
| OLD | NEW |