| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/audio/mac/audio_low_latency_output_mac.h" | 5 #include "media/audio/mac/audio_low_latency_output_mac.h" |
| 6 | 6 |
| 7 #include <CoreServices/CoreServices.h> | 7 #include <CoreServices/CoreServices.h> |
| 8 | 8 |
| 9 #include "base/basictypes.h" | 9 #include "base/basictypes.h" |
| 10 #include "base/logging.h" | 10 #include "base/logging.h" |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 49 source_(NULL), | 49 source_(NULL), |
| 50 output_unit_(0), | 50 output_unit_(0), |
| 51 output_device_id_(kAudioObjectUnknown), | 51 output_device_id_(kAudioObjectUnknown), |
| 52 volume_(1), | 52 volume_(1), |
| 53 hardware_latency_frames_(0) { | 53 hardware_latency_frames_(0) { |
| 54 // We must have a manager. | 54 // We must have a manager. |
| 55 DCHECK(manager_); | 55 DCHECK(manager_); |
| 56 // A frame is one sample across all channels. In interleaved audio the per | 56 // A frame is one sample across all channels. In interleaved audio the per |
| 57 // frame fields identify the set of n |channels|. In uncompressed audio, a | 57 // frame fields identify the set of n |channels|. In uncompressed audio, a |
| 58 // packet is always one frame. | 58 // packet is always one frame. |
| 59 format_.mSampleRate = params.sample_rate; | 59 format_.mSampleRate = params.sample_rate(); |
| 60 format_.mFormatID = kAudioFormatLinearPCM; | 60 format_.mFormatID = kAudioFormatLinearPCM; |
| 61 format_.mFormatFlags = kLinearPCMFormatFlagIsPacked | | 61 format_.mFormatFlags = kLinearPCMFormatFlagIsPacked | |
| 62 kLinearPCMFormatFlagIsSignedInteger; | 62 kLinearPCMFormatFlagIsSignedInteger; |
| 63 format_.mBitsPerChannel = params.bits_per_sample; | 63 format_.mBitsPerChannel = params.bits_per_sample(); |
| 64 format_.mChannelsPerFrame = params.channels; | 64 format_.mChannelsPerFrame = params.channels(); |
| 65 format_.mFramesPerPacket = 1; | 65 format_.mFramesPerPacket = 1; |
| 66 format_.mBytesPerPacket = (format_.mBitsPerChannel * params.channels) / 8; | 66 format_.mBytesPerPacket = (format_.mBitsPerChannel * params.channels()) / 8; |
| 67 format_.mBytesPerFrame = format_.mBytesPerPacket; | 67 format_.mBytesPerFrame = format_.mBytesPerPacket; |
| 68 format_.mReserved = 0; | 68 format_.mReserved = 0; |
| 69 | 69 |
| 70 // Calculate the number of sample frames per callback. | 70 // Calculate the number of sample frames per callback. |
| 71 number_of_frames_ = params.GetPacketSize() / format_.mBytesPerPacket; | 71 number_of_frames_ = params.GetBytesPerBuffer() / format_.mBytesPerPacket; |
| 72 } | 72 } |
| 73 | 73 |
| 74 AUAudioOutputStream::~AUAudioOutputStream() { | 74 AUAudioOutputStream::~AUAudioOutputStream() { |
| 75 } | 75 } |
| 76 | 76 |
| 77 bool AUAudioOutputStream::Open() { | 77 bool AUAudioOutputStream::Open() { |
| 78 // Obtain the current input device selected by the user. | 78 // Obtain the current input device selected by the user. |
| 79 UInt32 size = sizeof(output_device_id_); | 79 UInt32 size = sizeof(output_device_id_); |
| 80 AudioObjectPropertyAddress default_output_device_address = { | 80 AudioObjectPropertyAddress default_output_device_address = { |
| 81 kAudioHardwarePropertyDefaultOutputDevice, | 81 kAudioHardwarePropertyDefaultOutputDevice, |
| (...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 245 AudioBufferList* io_data) { | 245 AudioBufferList* io_data) { |
| 246 AUAudioOutputStream* audio_output = | 246 AUAudioOutputStream* audio_output = |
| 247 static_cast<AUAudioOutputStream*>(user_data); | 247 static_cast<AUAudioOutputStream*>(user_data); |
| 248 DCHECK(audio_output); | 248 DCHECK(audio_output); |
| 249 if (!audio_output) | 249 if (!audio_output) |
| 250 return -1; | 250 return -1; |
| 251 | 251 |
| 252 return audio_output->Render(number_of_frames, io_data, output_time_stamp); | 252 return audio_output->Render(number_of_frames, io_data, output_time_stamp); |
| 253 } | 253 } |
| 254 | 254 |
| 255 double AUAudioOutputStream::HardwareSampleRate() { | 255 int AUAudioOutputStream::HardwareSampleRate() { |
| 256 // Determine the default output device's sample-rate. | 256 // Determine the default output device's sample-rate. |
| 257 AudioDeviceID device_id = kAudioObjectUnknown; | 257 AudioDeviceID device_id = kAudioObjectUnknown; |
| 258 UInt32 info_size = sizeof(device_id); | 258 UInt32 info_size = sizeof(device_id); |
| 259 | 259 |
| 260 AudioObjectPropertyAddress default_output_device_address = { | 260 AudioObjectPropertyAddress default_output_device_address = { |
| 261 kAudioHardwarePropertyDefaultOutputDevice, | 261 kAudioHardwarePropertyDefaultOutputDevice, |
| 262 kAudioObjectPropertyScopeGlobal, | 262 kAudioObjectPropertyScopeGlobal, |
| 263 kAudioObjectPropertyElementMaster | 263 kAudioObjectPropertyElementMaster |
| 264 }; | 264 }; |
| 265 OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, | 265 OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, |
| (...skipping 17 matching lines...) Expand all Loading... |
| 283 result = AudioObjectGetPropertyData(device_id, | 283 result = AudioObjectGetPropertyData(device_id, |
| 284 &nominal_sample_rate_address, | 284 &nominal_sample_rate_address, |
| 285 0, | 285 0, |
| 286 0, | 286 0, |
| 287 &info_size, | 287 &info_size, |
| 288 &nominal_sample_rate); | 288 &nominal_sample_rate); |
| 289 OSSTATUS_DCHECK(result == noErr, result); | 289 OSSTATUS_DCHECK(result == noErr, result); |
| 290 if (result) | 290 if (result) |
| 291 return 0.0; // error | 291 return 0.0; // error |
| 292 | 292 |
| 293 return nominal_sample_rate; | 293 return static_cast<int>(nominal_sample_rate); |
| 294 } | 294 } |
| 295 | 295 |
| 296 double AUAudioOutputStream::GetHardwareLatency() { | 296 double AUAudioOutputStream::GetHardwareLatency() { |
| 297 if (!output_unit_ || output_device_id_ == kAudioObjectUnknown) { | 297 if (!output_unit_ || output_device_id_ == kAudioObjectUnknown) { |
| 298 DLOG(WARNING) << "Audio unit object is NULL or device ID is unknown"; | 298 DLOG(WARNING) << "Audio unit object is NULL or device ID is unknown"; |
| 299 return 0.0; | 299 return 0.0; |
| 300 } | 300 } |
| 301 | 301 |
| 302 // Get audio unit latency. | 302 // Get audio unit latency. |
| 303 Float64 audio_unit_latency_sec = 0.0; | 303 Float64 audio_unit_latency_sec = 0.0; |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 337 // Get the delay between the moment getting the callback and the scheduled | 337 // Get the delay between the moment getting the callback and the scheduled |
| 338 // time stamp that tells when the data is going to be played out. | 338 // time stamp that tells when the data is going to be played out. |
| 339 UInt64 output_time_ns = AudioConvertHostTimeToNanos( | 339 UInt64 output_time_ns = AudioConvertHostTimeToNanos( |
| 340 output_time_stamp->mHostTime); | 340 output_time_stamp->mHostTime); |
| 341 UInt64 now_ns = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime()); | 341 UInt64 now_ns = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime()); |
| 342 double delay_frames = static_cast<double> | 342 double delay_frames = static_cast<double> |
| 343 (1e-9 * (output_time_ns - now_ns) * format_.mSampleRate); | 343 (1e-9 * (output_time_ns - now_ns) * format_.mSampleRate); |
| 344 | 344 |
| 345 return (delay_frames + hardware_latency_frames_); | 345 return (delay_frames + hardware_latency_frames_); |
| 346 } | 346 } |
| OLD | NEW |