Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(194)

Unified Diff: media/audio/win/audio_low_latency_output_win.cc

Issue 10823100: Adds support for multi-channel output audio for the low-latency path in Windows. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 8 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: media/audio/win/audio_low_latency_output_win.cc
diff --git a/media/audio/win/audio_low_latency_output_win.cc b/media/audio/win/audio_low_latency_output_win.cc
index 65ed8be35ed262630c51fe0588c21ab65b172242..1f2ddaf8c7c32326bdf98929f0e727e39013fe57 100644
--- a/media/audio/win/audio_low_latency_output_win.cc
+++ b/media/audio/win/audio_low_latency_output_win.cc
@@ -21,6 +21,43 @@ using base::win::ScopedCoMem;
namespace media {
+bool ChannelUpMix(void* input,
tommi (sloooow) - chröme 2012/07/31 21:39:30 I guess this will accept in/out layout parameters
+ void* output,
+ int in_channels,
+ int out_channels,
+ size_t number_of_input_bytes) {
+ DCHECK(input);
+ DCHECK(output);
+ DCHECK_GT(out_channels, in_channels);
+
+ // TODO(henrika): we only support 16-bit samples currently.
+ int16* in16 = static_cast<int16*>(input);
tommi (sloooow) - chröme 2012/07/31 21:39:30 reinterpret_cast
henrika (OOO until Aug 14) 2012/08/01 16:11:09 Done.
+ int16* out16 = static_cast<int16*>(output);
+
+ if (in_channels == 2) {
tommi (sloooow) - chröme 2012/07/31 21:39:30 and here check for in_layout == STEREO?
henrika (OOO until Aug 14) 2012/08/01 16:11:09 see comment below
+ int number_of_input_stereo_samples = (number_of_input_bytes >> 2);
+ // 2 -> N.1 up-mixing where N=out_channels-1.
+ // See http://www.w3.org/TR/webaudio/#UpMix-sub for details.
+ for (int i = 0; i < number_of_input_stereo_samples; i++) {
tommi (sloooow) - chröme 2012/07/31 21:39:30 ++i
tommi (sloooow) - chröme 2012/07/31 21:39:30 as discussed offline, when it comes time to do thi
henrika (OOO until Aug 14) 2012/08/01 16:11:09 Done.
henrika (OOO until Aug 14) 2012/08/01 16:11:09 Will add a TODO() on that one for now.
+ // Copy Front Left and Front Right channels as is.
tommi (sloooow) - chröme 2012/07/31 21:39:30 I know this is just a start, so if I may make a su
henrika (OOO until Aug 14) 2012/08/01 16:11:09 I actually did something like this initially but t
+ out16[0] = in16[0];
+ out16[1] = in16[1];
+
+ // Set all surround channels (and LFE) to zero.
+ for (int n = 2; n < out_channels; n++) {
+ out16[n] = 0;
+ }
+
+ in16 += 2;
+ out16 += out_channels;
+ }
+ } else {
+ LOG(ERROR) << "Up-mixing is not supported.";
+ return false;
+ }
+ return true;
+}
+
// static
AUDCLNT_SHAREMODE WASAPIAudioOutputStream::GetShareMode() {
const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
@@ -35,6 +72,7 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
: com_init_(ScopedCOMInitializer::kMTA),
creating_thread_id_(base::PlatformThread::CurrentId()),
manager_(manager),
+ client_audio_parameters_(params),
scherkus (not reviewing) 2012/08/01 00:14:05 note: this isn't used anywhere
henrika (OOO until Aug 14) 2012/08/01 16:11:09 It is used in call to ChannelUpMix() to feed in th
render_thread_(NULL),
opened_(false),
started_(false),
@@ -43,6 +81,9 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
endpoint_buffer_size_frames_(0),
device_role_(device_role),
share_mode_(GetShareMode()),
+ endpoint_channel_count_(HardwareChannelCount()), // <=> default device
scherkus (not reviewing) 2012/08/01 00:14:05 can't these be derived from format_? even though
henrika (OOO until Aug 14) 2012/08/01 16:11:09 I was able to remove this member by doing almost a
+ endpoint_channel_config_(ChannelConfig()), // <=> default device
scherkus (not reviewing) 2012/08/01 00:14:05 this is only used to set format_.dwChannelMask, wh
henrika (OOO until Aug 14) 2012/08/01 16:11:09 Removed.
+ channel_factor_(0),
num_written_frames_(0),
source_(NULL) {
CHECK(com_init_.succeeded());
@@ -56,22 +97,49 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
VLOG(1) << ">> Note that EXCLUSIVE MODE is enabled <<";
}
- // Set up the desired render format specified by the client.
- format_.nSamplesPerSec = params.sample_rate();
- format_.wFormatTag = WAVE_FORMAT_PCM;
- format_.wBitsPerSample = params.bits_per_sample();
- format_.nChannels = params.channels();
- format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels;
- format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign;
- format_.cbSize = 0;
+ // It is possible to set the number of channels in |params| to a lower value
+ // than we use as the internal number of audio channels when the audio stream
+ // is opened. If this mode (channel_factor_ > 1) is set, the native audio
+ // layer will expect a larger number of channels in the interleaved audio
+ // stream and a channel up-mix will be performed after the OnMoreData()
+ // callback to compensate for the lower number of channels provided by the
+ // audio source.
+ // Example: params.channels() is 2 and endpoint_channel_count() is 8 =>
+ // the audio stream is opened up in 7.1 surround mode but the source only
+ // provides a stereo signal as input, i.e., a stereo up-mix (2 -> 7.1) will
+ // take place before sending the stream to the audio driver.
+ channel_factor_ = endpoint_channel_count() / params.channels();
scherkus (not reviewing) 2012/08/01 00:14:05 hmmm if you end up keeping client_audio_parameters
henrika (OOO until Aug 14) 2012/08/01 16:11:09 Added client_channel_count_ instead and turned cha
+ DCHECK_GE(channel_factor_, 1) << "Unsupported channel count.";
+ DVLOG(1) << "client channels: " << params.channels();
+ DVLOG(1) << "channel factor: " << channel_factor_;
+
+ // Set up the desired render format specified by the client. We use the
+ // WAVE_FORMAT_EXTENSIBLE structure to ensure that multiple channel ordering
+ // and high precision data can be supported.
+
+ // Begin with the WAVEFORMATEX structure that specifies the basic format.
+ WAVEFORMATEX* format = &format_.Format;
+ format->wFormatTag = WAVE_FORMAT_EXTENSIBLE;
+ format->nChannels = endpoint_channel_count();
+ format->nSamplesPerSec = params.sample_rate();
+ format->wBitsPerSample = params.bits_per_sample();
+ format->nBlockAlign = (format->wBitsPerSample / 8) * format->nChannels;
+ format->nAvgBytesPerSec = format->nSamplesPerSec * format->nBlockAlign;
+ format->cbSize = 22;
tommi (sloooow) - chröme 2012/07/31 21:39:30 22? use sizeof? could also move this to the top
henrika (OOO until Aug 14) 2012/08/01 16:11:09 It is actually "MSDN-standard" to hard code 22 her
+
+ // Add the parts which are unique to WAVE_FORMAT_EXTENSIBLE.
+ format_.Samples.wValidBitsPerSample = params.bits_per_sample();
+ format_.dwChannelMask = endpoint_channel_config();
+ format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
// Size in bytes of each audio frame.
- frame_size_ = format_.nBlockAlign;
+ frame_size_ = format->nBlockAlign;
// Store size (in different units) of audio packets which we expect to
// get from the audio endpoint device in each render event.
- packet_size_frames_ = params.GetBytesPerBuffer() / format_.nBlockAlign;
- packet_size_bytes_ = params.GetBytesPerBuffer();
+ packet_size_frames_ =
+ (channel_factor_ * params.GetBytesPerBuffer()) / format->nBlockAlign;
+ packet_size_bytes_ = channel_factor_ * params.GetBytesPerBuffer();
packet_size_ms_ = (1000.0 * packet_size_frames_) / params.sample_rate();
DVLOG(1) << "Number of bytes per audio frame : " << frame_size_;
DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_;
@@ -293,7 +361,119 @@ void WASAPIAudioOutputStream::GetVolume(double* volume) {
}
// static
+int WASAPIAudioOutputStream::HardwareChannelCount() {
+ // Use a WAVEFORMATEXTENSIBLE structure since it can specify both the
+ // number of channels and the mapping of channels to speakers for
+ // multichannel devices.
+ base::win::ScopedCoMem<WAVEFORMATPCMEX> format_ex;
+ HRESULT hr = GetMixFormat(
+ eConsole, reinterpret_cast<WAVEFORMATEX**>(&format_ex));
tommi (sloooow) - chröme 2012/07/31 21:39:30 the reinterpret_cast shouldn't be needed. operato
henrika (OOO until Aug 14) 2012/08/01 16:11:09 Tell that to the compiler ;-) error C2664: 'media
+ if (FAILED(hr))
+ return 0;
+
+ // Number of channels in the stream. Corresponds to the number of bits
+ // set in the dwChannelMask.
+ DVLOG(2) << "endpoint channels: " << format_ex->Format.nChannels;
+
+ return static_cast<int>(format_ex->Format.nChannels);
+}
+
+// static
+ChannelLayout WASAPIAudioOutputStream::HardwareChannelLayout() {
+ return ChannelConfigToChromeChannelLayout(ChannelConfig());
+}
+
+// static
+uint32 WASAPIAudioOutputStream::ChannelConfig() {
+ // Use a WAVEFORMATEXTENSIBLE structure since it can specify both the
+ // number of channels and the mapping of channels to speakers for
+ // multichannel devices.
+ base::win::ScopedCoMem<WAVEFORMATPCMEX> format_ex;
+ HRESULT hr = GetMixFormat(
+ eConsole, reinterpret_cast<WAVEFORMATEX**>(&format_ex));
tommi (sloooow) - chröme 2012/07/31 21:39:30 same here
henrika (OOO until Aug 14) 2012/08/01 16:11:09 see above
+ if (FAILED(hr))
+ return 0;
+
+ // The dwChannelMask member specifies which channels are present in the
+ // multichannel stream. The least significant bit corresponds to the
+ // front left speaker, the next least significant bit corresponds to the
+ // front right speaker, and so on.
+ // See http://msdn.microsoft.com/en-us/library/windows/desktop/dd757714(v=vs.85).aspx
+ // for more details on the channel mapping.
+ DVLOG(2) << "dwChannelMask: 0x" << std::hex << format_ex->dwChannelMask;
+
+ // See http://en.wikipedia.org/wiki/Surround_sound for more details on
+ // how to name various speaker configurations. The list below is not complete.
+ std::string speaker_config("Undefined");
tommi (sloooow) - chröme 2012/07/31 21:39:30 move this into an #ifndef NDEBUG? also, you don't
henrika (OOO until Aug 14) 2012/08/01 16:11:09 Done.
+ if (format_ex->dwChannelMask == KSAUDIO_SPEAKER_MONO)
tommi (sloooow) - chröme 2012/07/31 21:39:30 switch()?
henrika (OOO until Aug 14) 2012/08/01 16:11:09 Done.
+ speaker_config = "Mono";
+ else if (format_ex->dwChannelMask == KSAUDIO_SPEAKER_STEREO)
+ speaker_config = "Stereo";
+ else if (format_ex->dwChannelMask == KSAUDIO_SPEAKER_5POINT1_SURROUND)
+ speaker_config = "5.1 surround";
+ else if (format_ex->dwChannelMask == KSAUDIO_SPEAKER_5POINT1)
+ speaker_config = "5.1";
+ if (format_ex->dwChannelMask == KSAUDIO_SPEAKER_7POINT1_SURROUND)
+ speaker_config = "7.1 surround";
+ else if (format_ex->dwChannelMask == KSAUDIO_SPEAKER_7POINT1)
+ speaker_config = "7.1";
+ DVLOG(2) << "speaker configuration: " << speaker_config;
+
+ return static_cast<uint32>(format_ex->dwChannelMask);
+}
+
+// static
+ChannelLayout WASAPIAudioOutputStream::ChannelConfigToChromeChannelLayout(
+ uint32 config) {
+ switch (config) {
+ case KSAUDIO_SPEAKER_DIRECTOUT:
+ return CHANNEL_LAYOUT_NONE;
+ case KSAUDIO_SPEAKER_MONO:
+ return CHANNEL_LAYOUT_MONO;
+ case KSAUDIO_SPEAKER_STEREO:
+ return CHANNEL_LAYOUT_STEREO;
+ case KSAUDIO_SPEAKER_QUAD:
+ return CHANNEL_LAYOUT_QUAD;
+ case KSAUDIO_SPEAKER_SURROUND:
+ return CHANNEL_LAYOUT_4_0;
+ case KSAUDIO_SPEAKER_5POINT1:
+ return CHANNEL_LAYOUT_5_1_BACK;
+ case KSAUDIO_SPEAKER_5POINT1_SURROUND:
+ return CHANNEL_LAYOUT_5_1;
+ case KSAUDIO_SPEAKER_7POINT1:
+ return CHANNEL_LAYOUT_7_1_WIDE;
+ case KSAUDIO_SPEAKER_7POINT1_SURROUND:
+ return CHANNEL_LAYOUT_7_1;
+ default:
+ DVLOG(1) << "Unsupported channel layout: " << config;
tommi (sloooow) - chröme 2012/07/31 21:39:30 add break;
henrika (OOO until Aug 14) 2012/08/01 16:11:09 Done.
+ }
+ return CHANNEL_LAYOUT_UNSUPPORTED;
+}
+
+// static
int WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) {
+ base::win::ScopedCoMem<WAVEFORMATEX> format;
+ HRESULT hr = GetMixFormat(device_role, &format);
+ if (FAILED(hr))
+ return 0;
+
+ DVLOG(2) << "nSamplesPerSec: " << format->nSamplesPerSec;
+ return static_cast<int>(format->nSamplesPerSec);
+}
+
+// static
+HRESULT WASAPIAudioOutputStream::GetMixFormat(ERole device_role,
+ WAVEFORMATEX** device_format) {
+ // Note that we are using the IAudioClient::GetMixFormat() API to get the
+ // device format in this function. It is in fact possible to be "more native",
+ // and ask the endpoint device directly for its properties. Given a reference
+ // to the IMMDevice interface of an endpoint object, a client can obtain a
+ // reference to the endpoint object's property store by calling the
+ // IMMDevice::OpenPropertyStore() method. However, I have not been able to
+ // access any valuable information using this method on my HP Z600 desktop,
+ // hence it feels more appropriate to use the IAudioClient::GetMixFormat()
+ // approach instead.
+
// Calling this function only makes sense for shared mode streams, since
// if the device will be opened in exclusive mode, then the application
// specified format is used instead. However, the result of this method can
@@ -331,21 +511,13 @@ int WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) {
CLSCTX_INPROC_SERVER,
NULL,
audio_client.ReceiveVoid());
- if (FAILED(hr)) {
- NOTREACHED() << "error code: " << std::hex << hr;
- return 0.0;
- }
-
- // Retrieve the stream format that the audio engine uses for its internal
- // processing of shared-mode streams.
- base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format;
- hr = audio_client->GetMixFormat(&audio_engine_mix_format);
- if (FAILED(hr)) {
- NOTREACHED() << "error code: " << std::hex << hr;
- return 0.0;
+ DCHECK(SUCCEEDED(hr)) << "Failed to activate device: " << std::hex << hr;
+ if (SUCCEEDED(hr)) {
+ hr = audio_client->GetMixFormat(device_format);
+ DCHECK(SUCCEEDED(hr)) << "GetMixFormat: " << std::hex << hr;
}
- return static_cast<int>(audio_engine_mix_format->nSamplesPerSec);
+ return hr;
}
void WASAPIAudioOutputStream::Run() {
@@ -478,7 +650,7 @@ void WASAPIAudioOutputStream::Run() {
if (SUCCEEDED(hr)) {
// Stream position of the sample that is currently playing
// through the speaker.
- double pos_sample_playing_frames = format_.nSamplesPerSec *
+ double pos_sample_playing_frames = format_.Format.nSamplesPerSec *
(static_cast<double>(position) / device_frequency);
// Stream position of the last sample written to the endpoint
@@ -499,23 +671,49 @@ void WASAPIAudioOutputStream::Run() {
// time stamp can be used at the client side to compensate for
// the delay between the usage of the delay value and the time
// of generation.
- uint32 num_filled_bytes = source_->OnMoreData(
- audio_data, packet_size_bytes_,
- AudioBuffersState(0, audio_delay_bytes));
-
- // Perform in-place, software-volume adjustments.
- media::AdjustVolume(audio_data,
- num_filled_bytes,
- format_.nChannels,
- format_.wBitsPerSample >> 3,
- volume_);
-
- // Zero out the part of the packet which has not been filled by
- // the client. Using silence is the least bad option in this
- // situation.
- if (num_filled_bytes < packet_size_bytes_) {
- memset(&audio_data[num_filled_bytes], 0,
- (packet_size_bytes_ - num_filled_bytes));
+
+ // TODO(henrika): improve comments about possible upmixing here...
+
+ uint32 num_filled_bytes = 0;
+
+ if (channel_factor_ == 1) {
+ // Case I: no up-mixing.
+ num_filled_bytes = source_->OnMoreData(
+ audio_data, packet_size_bytes_,
+ AudioBuffersState(0, audio_delay_bytes));
+
+ // Perform in-place, software-volume adjustments.
+ media::AdjustVolume(audio_data,
+ num_filled_bytes,
+ format_.Format.nChannels,
+ format_.Format.wBitsPerSample >> 3,
+ volume_);
+
+ // Zero out the part of the packet which has not been filled by
+ // the client. Using silence is the least bad option in this
+ // situation.
+ if (num_filled_bytes < packet_size_bytes_) {
+ memset(&audio_data[num_filled_bytes], 0,
+ (packet_size_bytes_ - num_filled_bytes));
tommi (sloooow) - chröme 2012/07/31 21:39:30 indent? (looks off by 1)
henrika (OOO until Aug 14) 2012/08/01 16:11:09 Done.
+ }
+ } else {
+ // Case II: up-mixing.
+ const int audio_source_size_bytes =
+ packet_size_bytes_ / channel_factor_;
+ scoped_array<uint8> buffer;
+ buffer.reset(new uint8[audio_source_size_bytes]);
+
+ num_filled_bytes = source_->OnMoreData(
+ buffer.get(), audio_source_size_bytes,
+ AudioBuffersState(0, audio_delay_bytes));
+
+ ChannelUpMix(buffer.get(),
+ &audio_data[0],
+ client_channel_count(),
+ endpoint_channel_count(),
+ num_filled_bytes);
+
+ // TODO(henrika): take care of zero-out for this case as well.
}
// Release the buffer space acquired in the GetBuffer() call.
@@ -605,7 +803,8 @@ HRESULT WASAPIAudioOutputStream::ActivateRenderDevice() {
// Retrieve the stream format that the audio engine uses for its internal
// processing/mixing of shared-mode streams.
audio_engine_mix_format_.Reset(NULL);
- hr = audio_client->GetMixFormat(&audio_engine_mix_format_);
+ hr = audio_client->GetMixFormat(
+ reinterpret_cast<WAVEFORMATEX**>(&audio_engine_mix_format_));
tommi (sloooow) - chröme 2012/07/31 21:39:30 no cast should be necessary
henrika (OOO until Aug 14) 2012/08/01 16:11:09 see previous comment.
if (SUCCEEDED(hr)) {
audio_client_ = audio_client;
@@ -622,10 +821,10 @@ bool WASAPIAudioOutputStream::DesiredFormatIsSupported() {
// which is stored in the |audio_engine_mix_format_| member and it is also
// possible to receive a proposed (closest) format if the current format is
// not supported.
- base::win::ScopedCoMem<WAVEFORMATEX> closest_match;
- HRESULT hr = audio_client_->IsFormatSupported(share_mode(),
- &format_,
- &closest_match);
+ base::win::ScopedCoMem<WAVEFORMATEXTENSIBLE> closest_match;
+ HRESULT hr = audio_client_->IsFormatSupported(
+ share_mode(), reinterpret_cast<WAVEFORMATEX*>(&format_),
tommi (sloooow) - chröme 2012/07/31 21:39:30 don't cast format_. instead use operatorT*() and r
henrika (OOO until Aug 14) 2012/08/01 16:11:09 Same comment as before. IsFormatSupported takes WA
+ reinterpret_cast<WAVEFORMATEX**>(&closest_match));
tommi (sloooow) - chröme 2012/07/31 21:39:30 cast not needed
henrika (OOO until Aug 14) 2012/08/01 16:11:09 ditto
// This log can only be triggered for shared mode.
DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported "
@@ -633,10 +832,10 @@ bool WASAPIAudioOutputStream::DesiredFormatIsSupported() {
// This log can be triggered both for shared and exclusive modes.
DLOG_IF(ERROR, hr == AUDCLNT_E_UNSUPPORTED_FORMAT) << "Unsupported format.";
if (hr == S_FALSE) {
- DVLOG(1) << "wFormatTag : " << closest_match->wFormatTag;
- DVLOG(1) << "nChannels : " << closest_match->nChannels;
- DVLOG(1) << "nSamplesPerSec: " << closest_match->nSamplesPerSec;
- DVLOG(1) << "wBitsPerSample: " << closest_match->wBitsPerSample;
+ DVLOG(1) << "wFormatTag : " << closest_match->Format.wFormatTag;
+ DVLOG(1) << "nChannels : " << closest_match->Format.nChannels;
+ DVLOG(1) << "nSamplesPerSec: " << closest_match->Format.nSamplesPerSec;
+ DVLOG(1) << "wBitsPerSample: " << closest_match->Format.wBitsPerSample;
}
return (hr == S_OK);
@@ -727,11 +926,11 @@ HRESULT WASAPIAudioOutputStream::SharedModeInitialization() {
// The exact details are yet to be determined based on tests with different
// audio clients.
int glitch_free_buffer_size_ms = static_cast<int>(packet_size_ms_ + 0.5);
- if (audio_engine_mix_format_->nSamplesPerSec == 48000) {
+ if (audio_engine_mix_format_->Format.nSamplesPerSec == 48000) {
// Initial tests have shown that we have to add 10 ms extra to
// ensure that we don't run empty for any packet size.
glitch_free_buffer_size_ms += 10;
- } else if (audio_engine_mix_format_->nSamplesPerSec == 44100) {
+ } else if (audio_engine_mix_format_->Format.nSamplesPerSec == 44100) {
// Initial tests have shown that we have to add 20 ms extra to
// ensure that we don't run empty for any packet size.
glitch_free_buffer_size_ms += 20;
@@ -755,7 +954,7 @@ HRESULT WASAPIAudioOutputStream::SharedModeInitialization() {
AUDCLNT_STREAMFLAGS_NOPERSIST,
requested_buffer_duration,
0,
- &format_,
+ reinterpret_cast<WAVEFORMATEX*>(&format_),
tommi (sloooow) - chröme 2012/07/31 21:39:30 use operator T*()
henrika (OOO until Aug 14) 2012/08/01 16:11:09 ditto
NULL);
return hr;
}
@@ -763,7 +962,7 @@ HRESULT WASAPIAudioOutputStream::SharedModeInitialization() {
HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization() {
DCHECK_EQ(share_mode(), AUDCLNT_SHAREMODE_EXCLUSIVE);
- float f = (1000.0 * packet_size_frames_) / format_.nSamplesPerSec;
+ float f = (1000.0 * packet_size_frames_) / format_.Format.nSamplesPerSec;
REFERENCE_TIME requested_buffer_duration =
static_cast<REFERENCE_TIME>(f * 10000.0 + 0.5);
@@ -780,7 +979,7 @@ HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization() {
AUDCLNT_STREAMFLAGS_NOPERSIST,
requested_buffer_duration,
requested_buffer_duration,
- &format_,
+ reinterpret_cast<WAVEFORMATEX*>(&format_),
tommi (sloooow) - chröme 2012/07/31 21:39:30 operator
henrika (OOO until Aug 14) 2012/08/01 16:11:09 ditto
NULL);
if (FAILED(hr)) {
if (hr == AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) {
@@ -794,7 +993,8 @@ HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization() {
// Calculate new aligned periodicity. Each unit of reference time
// is 100 nanoseconds.
REFERENCE_TIME aligned_buffer_duration = static_cast<REFERENCE_TIME>(
- (10000000.0 * aligned_buffer_size / format_.nSamplesPerSec) + 0.5);
+ (10000000.0 * aligned_buffer_size / format_.Format.nSamplesPerSec)
+ + 0.5);
// It is possible to re-activate and re-initialize the audio client
// at this stage but we bail out with an error code instead and
@@ -834,7 +1034,7 @@ HRESULT WASAPIAudioOutputStream::QueryInterface(REFIID iid, void** object) {
}
STDMETHODIMP WASAPIAudioOutputStream::OnDeviceStateChanged(LPCWSTR device_id,
- DWORD new_state) {
+ DWORD new_state) {
#ifndef NDEBUG
std::string device_name = GetDeviceName(device_id);
std::string device_state;

Powered by Google App Engine
This is Rietveld 408576698