Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(187)

Side by Side Diff: media/audio/win/audio_low_latency_output_win.cc

Issue 10823100: Adds support for multi-channel output audio for the low-latency path in Windows. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 8 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "media/audio/win/audio_low_latency_output_win.h" 5 #include "media/audio/win/audio_low_latency_output_win.h"
6 6
7 #include <Functiondiscoverykeys_devpkey.h> 7 #include <Functiondiscoverykeys_devpkey.h>
8 8
9 #include "base/command_line.h" 9 #include "base/command_line.h"
10 #include "base/logging.h" 10 #include "base/logging.h"
11 #include "base/memory/scoped_ptr.h" 11 #include "base/memory/scoped_ptr.h"
12 #include "base/utf_string_conversions.h" 12 #include "base/utf_string_conversions.h"
13 #include "media/audio/audio_util.h" 13 #include "media/audio/audio_util.h"
14 #include "media/audio/win/audio_manager_win.h" 14 #include "media/audio/win/audio_manager_win.h"
15 #include "media/audio/win/avrt_wrapper_win.h" 15 #include "media/audio/win/avrt_wrapper_win.h"
16 #include "media/base/media_switches.h" 16 #include "media/base/media_switches.h"
17 17
18 using base::win::ScopedComPtr; 18 using base::win::ScopedComPtr;
19 using base::win::ScopedCOMInitializer; 19 using base::win::ScopedCOMInitializer;
20 using base::win::ScopedCoMem; 20 using base::win::ScopedCoMem;
21 21
22 namespace media { 22 namespace media {
23 23
24 bool ChannelUpMix(void* input,
tommi (sloooow) - chröme 2012/07/31 21:39:30 I guess this will accept in/out layout parameters
25 void* output,
26 int in_channels,
27 int out_channels,
28 size_t number_of_input_bytes) {
29 DCHECK(input);
30 DCHECK(output);
31 DCHECK_GT(out_channels, in_channels);
32
33 // TODO(henrika): we only support 16-bit samples currently.
34 int16* in16 = static_cast<int16*>(input);
tommi (sloooow) - chröme 2012/07/31 21:39:30 reinterpret_cast
henrika (OOO until Aug 14) 2012/08/01 16:11:09 Done.
35 int16* out16 = static_cast<int16*>(output);
36
37 if (in_channels == 2) {
tommi (sloooow) - chröme 2012/07/31 21:39:30 and here check for in_layout == STEREO?
henrika (OOO until Aug 14) 2012/08/01 16:11:09 see comment below
38 int number_of_input_stereo_samples = (number_of_input_bytes >> 2);
39 // 2 -> N.1 up-mixing where N=out_channels-1.
40 // See http://www.w3.org/TR/webaudio/#UpMix-sub for details.
41 for (int i = 0; i < number_of_input_stereo_samples; i++) {
tommi (sloooow) - chröme 2012/07/31 21:39:30 ++i
tommi (sloooow) - chröme 2012/07/31 21:39:30 as discussed offline, when it comes time to do thi
henrika (OOO until Aug 14) 2012/08/01 16:11:09 Done.
henrika (OOO until Aug 14) 2012/08/01 16:11:09 Will add a TODO() on that one for now.
42 // Copy Front Left and Front Right channels as is.
tommi (sloooow) - chröme 2012/07/31 21:39:30 I know this is just a start, so if I may make a su
henrika (OOO until Aug 14) 2012/08/01 16:11:09 I actually did something like this initially but t
43 out16[0] = in16[0];
44 out16[1] = in16[1];
45
46 // Set all surround channels (and LFE) to zero.
47 for (int n = 2; n < out_channels; n++) {
48 out16[n] = 0;
49 }
50
51 in16 += 2;
52 out16 += out_channels;
53 }
54 } else {
55 LOG(ERROR) << "Up-mixing is not supported.";
56 return false;
57 }
58 return true;
59 }
60
24 // static 61 // static
25 AUDCLNT_SHAREMODE WASAPIAudioOutputStream::GetShareMode() { 62 AUDCLNT_SHAREMODE WASAPIAudioOutputStream::GetShareMode() {
26 const CommandLine* cmd_line = CommandLine::ForCurrentProcess(); 63 const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
27 if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio)) 64 if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio))
28 return AUDCLNT_SHAREMODE_EXCLUSIVE; 65 return AUDCLNT_SHAREMODE_EXCLUSIVE;
29 return AUDCLNT_SHAREMODE_SHARED; 66 return AUDCLNT_SHAREMODE_SHARED;
30 } 67 }
31 68
32 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, 69 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
33 const AudioParameters& params, 70 const AudioParameters& params,
34 ERole device_role) 71 ERole device_role)
35 : com_init_(ScopedCOMInitializer::kMTA), 72 : com_init_(ScopedCOMInitializer::kMTA),
36 creating_thread_id_(base::PlatformThread::CurrentId()), 73 creating_thread_id_(base::PlatformThread::CurrentId()),
37 manager_(manager), 74 manager_(manager),
75 client_audio_parameters_(params),
scherkus (not reviewing) 2012/08/01 00:14:05 note: this isn't used anywhere
henrika (OOO until Aug 14) 2012/08/01 16:11:09 It is used in call to ChannelUpMix() to feed in th
38 render_thread_(NULL), 76 render_thread_(NULL),
39 opened_(false), 77 opened_(false),
40 started_(false), 78 started_(false),
41 restart_rendering_mode_(false), 79 restart_rendering_mode_(false),
42 volume_(1.0), 80 volume_(1.0),
43 endpoint_buffer_size_frames_(0), 81 endpoint_buffer_size_frames_(0),
44 device_role_(device_role), 82 device_role_(device_role),
45 share_mode_(GetShareMode()), 83 share_mode_(GetShareMode()),
84 endpoint_channel_count_(HardwareChannelCount()), // <=> default device
scherkus (not reviewing) 2012/08/01 00:14:05 can't these be derived from format_? even though
henrika (OOO until Aug 14) 2012/08/01 16:11:09 I was able to remove this member by doing almost a
85 endpoint_channel_config_(ChannelConfig()), // <=> default device
scherkus (not reviewing) 2012/08/01 00:14:05 this is only used to set format_.dwChannelMask, wh
henrika (OOO until Aug 14) 2012/08/01 16:11:09 Removed.
86 channel_factor_(0),
46 num_written_frames_(0), 87 num_written_frames_(0),
47 source_(NULL) { 88 source_(NULL) {
48 CHECK(com_init_.succeeded()); 89 CHECK(com_init_.succeeded());
49 DCHECK(manager_); 90 DCHECK(manager_);
50 91
51 // Load the Avrt DLL if not already loaded. Required to support MMCSS. 92 // Load the Avrt DLL if not already loaded. Required to support MMCSS.
52 bool avrt_init = avrt::Initialize(); 93 bool avrt_init = avrt::Initialize();
53 DCHECK(avrt_init) << "Failed to load the avrt.dll"; 94 DCHECK(avrt_init) << "Failed to load the avrt.dll";
54 95
55 if (share_mode() == AUDCLNT_SHAREMODE_EXCLUSIVE) { 96 if (share_mode() == AUDCLNT_SHAREMODE_EXCLUSIVE) {
56 VLOG(1) << ">> Note that EXCLUSIVE MODE is enabled <<"; 97 VLOG(1) << ">> Note that EXCLUSIVE MODE is enabled <<";
57 } 98 }
58 99
59 // Set up the desired render format specified by the client. 100 // It is possible to set the number of channels in |params| to a lower value
60 format_.nSamplesPerSec = params.sample_rate(); 101 // than we use as the internal number of audio channels when the audio stream
61 format_.wFormatTag = WAVE_FORMAT_PCM; 102 // is opened. If this mode (channel_factor_ > 1) is set, the native audio
62 format_.wBitsPerSample = params.bits_per_sample(); 103 // layer will expect a larger number of channels in the interleaved audio
63 format_.nChannels = params.channels(); 104 // stream and a channel up-mix will be performed after the OnMoreData()
64 format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels; 105 // callback to compensate for the lower number of channels provided by the
65 format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign; 106 // audio source.
66 format_.cbSize = 0; 107 // Example: params.channels() is 2 and endpoint_channel_count() is 8 =>
108 // the audio stream is opened up in 7.1 surround mode but the source only
109 // provides a stereo signal as input, i.e., a stereo up-mix (2 -> 7.1) will
110 // take place before sending the stream to the audio driver.
111 channel_factor_ = endpoint_channel_count() / params.channels();
scherkus (not reviewing) 2012/08/01 00:14:05 hmmm if you end up keeping client_audio_parameters
henrika (OOO until Aug 14) 2012/08/01 16:11:09 Added client_channel_count_ instead and turned cha
112 DCHECK_GE(channel_factor_, 1) << "Unsupported channel count.";
113 DVLOG(1) << "client channels: " << params.channels();
114 DVLOG(1) << "channel factor: " << channel_factor_;
115
116 // Set up the desired render format specified by the client. We use the
117 // WAVE_FORMAT_EXTENSIBLE structure to ensure that multiple channel ordering
118 // and high precision data can be supported.
119
120 // Begin with the WAVEFORMATEX structure that specifies the basic format.
121 WAVEFORMATEX* format = &format_.Format;
122 format->wFormatTag = WAVE_FORMAT_EXTENSIBLE;
123 format->nChannels = endpoint_channel_count();
124 format->nSamplesPerSec = params.sample_rate();
125 format->wBitsPerSample = params.bits_per_sample();
126 format->nBlockAlign = (format->wBitsPerSample / 8) * format->nChannels;
127 format->nAvgBytesPerSec = format->nSamplesPerSec * format->nBlockAlign;
128 format->cbSize = 22;
tommi (sloooow) - chröme 2012/07/31 21:39:30 22? use sizeof? could also move this to the top
henrika (OOO until Aug 14) 2012/08/01 16:11:09 It is actually "MSDN-standard" to hard code 22 her
129
130 // Add the parts which are unique to WAVE_FORMAT_EXTENSIBLE.
131 format_.Samples.wValidBitsPerSample = params.bits_per_sample();
132 format_.dwChannelMask = endpoint_channel_config();
133 format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
67 134
68 // Size in bytes of each audio frame. 135 // Size in bytes of each audio frame.
69 frame_size_ = format_.nBlockAlign; 136 frame_size_ = format->nBlockAlign;
70 137
71 // Store size (in different units) of audio packets which we expect to 138 // Store size (in different units) of audio packets which we expect to
72 // get from the audio endpoint device in each render event. 139 // get from the audio endpoint device in each render event.
73 packet_size_frames_ = params.GetBytesPerBuffer() / format_.nBlockAlign; 140 packet_size_frames_ =
74 packet_size_bytes_ = params.GetBytesPerBuffer(); 141 (channel_factor_ * params.GetBytesPerBuffer()) / format->nBlockAlign;
142 packet_size_bytes_ = channel_factor_ * params.GetBytesPerBuffer();
75 packet_size_ms_ = (1000.0 * packet_size_frames_) / params.sample_rate(); 143 packet_size_ms_ = (1000.0 * packet_size_frames_) / params.sample_rate();
76 DVLOG(1) << "Number of bytes per audio frame : " << frame_size_; 144 DVLOG(1) << "Number of bytes per audio frame : " << frame_size_;
77 DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_; 145 DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_;
78 DVLOG(1) << "Number of milliseconds per packet: " << packet_size_ms_; 146 DVLOG(1) << "Number of milliseconds per packet: " << packet_size_ms_;
79 147
80 // All events are auto-reset events and non-signaled initially. 148 // All events are auto-reset events and non-signaled initially.
81 149
82 // Create the event which the audio engine will signal each time 150 // Create the event which the audio engine will signal each time
83 // a buffer becomes ready to be processed by the client. 151 // a buffer becomes ready to be processed by the client.
84 audio_samples_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); 152 audio_samples_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
(...skipping 201 matching lines...) Expand 10 before | Expand all | Expand 10 after
286 } 354 }
287 volume_ = volume_float; 355 volume_ = volume_float;
288 } 356 }
289 357
290 void WASAPIAudioOutputStream::GetVolume(double* volume) { 358 void WASAPIAudioOutputStream::GetVolume(double* volume) {
291 DVLOG(1) << "GetVolume()"; 359 DVLOG(1) << "GetVolume()";
292 *volume = static_cast<double>(volume_); 360 *volume = static_cast<double>(volume_);
293 } 361 }
294 362
295 // static 363 // static
364 int WASAPIAudioOutputStream::HardwareChannelCount() {
365 // Use a WAVEFORMATEXTENSIBLE structure since it can specify both the
366 // number of channels and the mapping of channels to speakers for
367 // multichannel devices.
368 base::win::ScopedCoMem<WAVEFORMATPCMEX> format_ex;
369 HRESULT hr = GetMixFormat(
370 eConsole, reinterpret_cast<WAVEFORMATEX**>(&format_ex));
tommi (sloooow) - chröme 2012/07/31 21:39:30 the reinterpret_cast shouldn't be needed. operato
henrika (OOO until Aug 14) 2012/08/01 16:11:09 Tell that to the compiler ;-) error C2664: 'media
371 if (FAILED(hr))
372 return 0;
373
374 // Number of channels in the stream. Corresponds to the number of bits
375 // set in the dwChannelMask.
376 DVLOG(2) << "endpoint channels: " << format_ex->Format.nChannels;
377
378 return static_cast<int>(format_ex->Format.nChannels);
379 }
380
381 // static
382 ChannelLayout WASAPIAudioOutputStream::HardwareChannelLayout() {
383 return ChannelConfigToChromeChannelLayout(ChannelConfig());
384 }
385
386 // static
387 uint32 WASAPIAudioOutputStream::ChannelConfig() {
388 // Use a WAVEFORMATEXTENSIBLE structure since it can specify both the
389 // number of channels and the mapping of channels to speakers for
390 // multichannel devices.
391 base::win::ScopedCoMem<WAVEFORMATPCMEX> format_ex;
392 HRESULT hr = GetMixFormat(
393 eConsole, reinterpret_cast<WAVEFORMATEX**>(&format_ex));
tommi (sloooow) - chröme 2012/07/31 21:39:30 same here
henrika (OOO until Aug 14) 2012/08/01 16:11:09 see above
394 if (FAILED(hr))
395 return 0;
396
397 // The dwChannelMask member specifies which channels are present in the
398 // multichannel stream. The least significant bit corresponds to the
399 // front left speaker, the next least significant bit corresponds to the
400 // front right speaker, and so on.
401 // See http://msdn.microsoft.com/en-us/library/windows/desktop/dd757714(v=vs.8 5).aspx
402 // for more details on the channel mapping.
403 DVLOG(2) << "dwChannelMask: 0x" << std::hex << format_ex->dwChannelMask;
404
405 // See http://en.wikipedia.org/wiki/Surround_sound for more details on
406 // how to name various speaker configurations. The list below is not complete.
407 std::string speaker_config("Undefined");
tommi (sloooow) - chröme 2012/07/31 21:39:30 move this into an #ifndef NDEBUG? also, you don't
henrika (OOO until Aug 14) 2012/08/01 16:11:09 Done.
408 if (format_ex->dwChannelMask == KSAUDIO_SPEAKER_MONO)
tommi (sloooow) - chröme 2012/07/31 21:39:30 switch()?
henrika (OOO until Aug 14) 2012/08/01 16:11:09 Done.
409 speaker_config = "Mono";
410 else if (format_ex->dwChannelMask == KSAUDIO_SPEAKER_STEREO)
411 speaker_config = "Stereo";
412 else if (format_ex->dwChannelMask == KSAUDIO_SPEAKER_5POINT1_SURROUND)
413 speaker_config = "5.1 surround";
414 else if (format_ex->dwChannelMask == KSAUDIO_SPEAKER_5POINT1)
415 speaker_config = "5.1";
416 if (format_ex->dwChannelMask == KSAUDIO_SPEAKER_7POINT1_SURROUND)
417 speaker_config = "7.1 surround";
418 else if (format_ex->dwChannelMask == KSAUDIO_SPEAKER_7POINT1)
419 speaker_config = "7.1";
420 DVLOG(2) << "speaker configuration: " << speaker_config;
421
422 return static_cast<uint32>(format_ex->dwChannelMask);
423 }
424
425 // static
426 ChannelLayout WASAPIAudioOutputStream::ChannelConfigToChromeChannelLayout(
427 uint32 config) {
428 switch (config) {
429 case KSAUDIO_SPEAKER_DIRECTOUT:
430 return CHANNEL_LAYOUT_NONE;
431 case KSAUDIO_SPEAKER_MONO:
432 return CHANNEL_LAYOUT_MONO;
433 case KSAUDIO_SPEAKER_STEREO:
434 return CHANNEL_LAYOUT_STEREO;
435 case KSAUDIO_SPEAKER_QUAD:
436 return CHANNEL_LAYOUT_QUAD;
437 case KSAUDIO_SPEAKER_SURROUND:
438 return CHANNEL_LAYOUT_4_0;
439 case KSAUDIO_SPEAKER_5POINT1:
440 return CHANNEL_LAYOUT_5_1_BACK;
441 case KSAUDIO_SPEAKER_5POINT1_SURROUND:
442 return CHANNEL_LAYOUT_5_1;
443 case KSAUDIO_SPEAKER_7POINT1:
444 return CHANNEL_LAYOUT_7_1_WIDE;
445 case KSAUDIO_SPEAKER_7POINT1_SURROUND:
446 return CHANNEL_LAYOUT_7_1;
447 default:
448 DVLOG(1) << "Unsupported channel layout: " << config;
tommi (sloooow) - chröme 2012/07/31 21:39:30 add break;
henrika (OOO until Aug 14) 2012/08/01 16:11:09 Done.
449 }
450 return CHANNEL_LAYOUT_UNSUPPORTED;
451 }
452
453 // static
296 int WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) { 454 int WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) {
455 base::win::ScopedCoMem<WAVEFORMATEX> format;
456 HRESULT hr = GetMixFormat(device_role, &format);
457 if (FAILED(hr))
458 return 0;
459
460 DVLOG(2) << "nSamplesPerSec: " << format->nSamplesPerSec;
461 return static_cast<int>(format->nSamplesPerSec);
462 }
463
464 // static
465 HRESULT WASAPIAudioOutputStream::GetMixFormat(ERole device_role,
466 WAVEFORMATEX** device_format) {
467 // Note that we are using the IAudioClient::GetMixFormat() API to get the
468 // device format in this function. It is in fact possible to be "more native",
469 // and ask the endpoint device directly for its properties. Given a reference
470 // to the IMMDevice interface of an endpoint object, a client can obtain a
471 // reference to the endpoint object's property store by calling the
472 // IMMDevice::OpenPropertyStore() method. However, I have not been able to
473 // access any valuable information using this method on my HP Z600 desktop,
474 // hence it feels more appropriate to use the IAudioClient::GetMixFormat()
475 // approach instead.
476
297 // Calling this function only makes sense for shared mode streams, since 477 // Calling this function only makes sense for shared mode streams, since
298 // if the device will be opened in exclusive mode, then the application 478 // if the device will be opened in exclusive mode, then the application
299 // specified format is used instead. However, the result of this method can 479 // specified format is used instead. However, the result of this method can
300 // be useful for testing purposes so we don't DCHECK here. 480 // be useful for testing purposes so we don't DCHECK here.
301 DLOG_IF(WARNING, GetShareMode() == AUDCLNT_SHAREMODE_EXCLUSIVE) << 481 DLOG_IF(WARNING, GetShareMode() == AUDCLNT_SHAREMODE_EXCLUSIVE) <<
302 "The mixing sample rate will be ignored for exclusive-mode streams."; 482 "The mixing sample rate will be ignored for exclusive-mode streams.";
303 483
304 // It is assumed that this static method is called from a COM thread, i.e., 484 // It is assumed that this static method is called from a COM thread, i.e.,
305 // CoInitializeEx() is not called here again to avoid STA/MTA conflicts. 485 // CoInitializeEx() is not called here again to avoid STA/MTA conflicts.
306 ScopedComPtr<IMMDeviceEnumerator> enumerator; 486 ScopedComPtr<IMMDeviceEnumerator> enumerator;
(...skipping 17 matching lines...) Expand all
324 // "not found" when no speaker is plugged into the output jack). 504 // "not found" when no speaker is plugged into the output jack).
325 LOG(WARNING) << "No audio end point: " << std::hex << hr; 505 LOG(WARNING) << "No audio end point: " << std::hex << hr;
326 return 0.0; 506 return 0.0;
327 } 507 }
328 508
329 ScopedComPtr<IAudioClient> audio_client; 509 ScopedComPtr<IAudioClient> audio_client;
330 hr = endpoint_device->Activate(__uuidof(IAudioClient), 510 hr = endpoint_device->Activate(__uuidof(IAudioClient),
331 CLSCTX_INPROC_SERVER, 511 CLSCTX_INPROC_SERVER,
332 NULL, 512 NULL,
333 audio_client.ReceiveVoid()); 513 audio_client.ReceiveVoid());
334 if (FAILED(hr)) { 514 DCHECK(SUCCEEDED(hr)) << "Failed to activate device: " << std::hex << hr;
335 NOTREACHED() << "error code: " << std::hex << hr; 515 if (SUCCEEDED(hr)) {
336 return 0.0; 516 hr = audio_client->GetMixFormat(device_format);
517 DCHECK(SUCCEEDED(hr)) << "GetMixFormat: " << std::hex << hr;
337 } 518 }
338 519
339 // Retrieve the stream format that the audio engine uses for its internal 520 return hr;
340 // processing of shared-mode streams.
341 base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format;
342 hr = audio_client->GetMixFormat(&audio_engine_mix_format);
343 if (FAILED(hr)) {
344 NOTREACHED() << "error code: " << std::hex << hr;
345 return 0.0;
346 }
347
348 return static_cast<int>(audio_engine_mix_format->nSamplesPerSec);
349 } 521 }
350 522
351 void WASAPIAudioOutputStream::Run() { 523 void WASAPIAudioOutputStream::Run() {
352 ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA); 524 ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
353 525
354 // Increase the thread priority. 526 // Increase the thread priority.
355 render_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio); 527 render_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio);
356 528
357 // Enable MMCSS to ensure that this thread receives prioritized access to 529 // Enable MMCSS to ensure that this thread receives prioritized access to
358 // CPU resources. 530 // CPU resources.
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
471 // a render event and the time when the first audio sample in a 643 // a render event and the time when the first audio sample in a
472 // packet is played out through the speaker. This delay value 644 // packet is played out through the speaker. This delay value
473 // can typically be utilized by an acoustic echo-control (AEC) 645 // can typically be utilized by an acoustic echo-control (AEC)
474 // unit at the render side. 646 // unit at the render side.
475 UINT64 position = 0; 647 UINT64 position = 0;
476 int audio_delay_bytes = 0; 648 int audio_delay_bytes = 0;
477 hr = audio_clock->GetPosition(&position, NULL); 649 hr = audio_clock->GetPosition(&position, NULL);
478 if (SUCCEEDED(hr)) { 650 if (SUCCEEDED(hr)) {
479 // Stream position of the sample that is currently playing 651 // Stream position of the sample that is currently playing
480 // through the speaker. 652 // through the speaker.
481 double pos_sample_playing_frames = format_.nSamplesPerSec * 653 double pos_sample_playing_frames = format_.Format.nSamplesPerSec *
482 (static_cast<double>(position) / device_frequency); 654 (static_cast<double>(position) / device_frequency);
483 655
484 // Stream position of the last sample written to the endpoint 656 // Stream position of the last sample written to the endpoint
485 // buffer. Note that, the packet we are about to receive in 657 // buffer. Note that, the packet we are about to receive in
486 // the upcoming callback is also included. 658 // the upcoming callback is also included.
487 size_t pos_last_sample_written_frames = 659 size_t pos_last_sample_written_frames =
488 num_written_frames_ + packet_size_frames_; 660 num_written_frames_ + packet_size_frames_;
489 661
490 // Derive the actual delay value which will be fed to the 662 // Derive the actual delay value which will be fed to the
491 // render client using the OnMoreData() callback. 663 // render client using the OnMoreData() callback.
492 audio_delay_bytes = (pos_last_sample_written_frames - 664 audio_delay_bytes = (pos_last_sample_written_frames -
493 pos_sample_playing_frames) * frame_size_; 665 pos_sample_playing_frames) * frame_size_;
494 } 666 }
495 667
496 // Read a data packet from the registered client source and 668 // Read a data packet from the registered client source and
497 // deliver a delay estimate in the same callback to the client. 669 // deliver a delay estimate in the same callback to the client.
498 // A time stamp is also stored in the AudioBuffersState. This 670 // A time stamp is also stored in the AudioBuffersState. This
499 // time stamp can be used at the client side to compensate for 671 // time stamp can be used at the client side to compensate for
500 // the delay between the usage of the delay value and the time 672 // the delay between the usage of the delay value and the time
501 // of generation. 673 // of generation.
502 uint32 num_filled_bytes = source_->OnMoreData(
503 audio_data, packet_size_bytes_,
504 AudioBuffersState(0, audio_delay_bytes));
505 674
506 // Perform in-place, software-volume adjustments. 675 // TODO(henrika): improve comments about possible upmixing here...
507 media::AdjustVolume(audio_data,
508 num_filled_bytes,
509 format_.nChannels,
510 format_.wBitsPerSample >> 3,
511 volume_);
512 676
513 // Zero out the part of the packet which has not been filled by 677 uint32 num_filled_bytes = 0;
514 // the client. Using silence is the least bad option in this 678
515 // situation. 679 if (channel_factor_ == 1) {
516 if (num_filled_bytes < packet_size_bytes_) { 680 // Case I: no up-mixing.
517 memset(&audio_data[num_filled_bytes], 0, 681 num_filled_bytes = source_->OnMoreData(
518 (packet_size_bytes_ - num_filled_bytes)); 682 audio_data, packet_size_bytes_,
683 AudioBuffersState(0, audio_delay_bytes));
684
685 // Perform in-place, software-volume adjustments.
686 media::AdjustVolume(audio_data,
687 num_filled_bytes,
688 format_.Format.nChannels,
689 format_.Format.wBitsPerSample >> 3,
690 volume_);
691
692 // Zero out the part of the packet which has not been filled by
693 // the client. Using silence is the least bad option in this
694 // situation.
695 if (num_filled_bytes < packet_size_bytes_) {
696 memset(&audio_data[num_filled_bytes], 0,
697 (packet_size_bytes_ - num_filled_bytes));
tommi (sloooow) - chröme 2012/07/31 21:39:30 indent? (looks off by 1)
henrika (OOO until Aug 14) 2012/08/01 16:11:09 Done.
698 }
699 } else {
700 // Case II: up-mixing.
701 const int audio_source_size_bytes =
702 packet_size_bytes_ / channel_factor_;
703 scoped_array<uint8> buffer;
704 buffer.reset(new uint8[audio_source_size_bytes]);
705
706 num_filled_bytes = source_->OnMoreData(
707 buffer.get(), audio_source_size_bytes,
708 AudioBuffersState(0, audio_delay_bytes));
709
710 ChannelUpMix(buffer.get(),
711 &audio_data[0],
712 client_channel_count(),
713 endpoint_channel_count(),
714 num_filled_bytes);
715
716 // TODO(henrika): take care of zero-out for this case as well.
519 } 717 }
520 718
521 // Release the buffer space acquired in the GetBuffer() call. 719 // Release the buffer space acquired in the GetBuffer() call.
522 DWORD flags = 0; 720 DWORD flags = 0;
523 audio_render_client_->ReleaseBuffer(packet_size_frames_, 721 audio_render_client_->ReleaseBuffer(packet_size_frames_,
524 flags); 722 flags);
525 723
526 num_written_frames_ += packet_size_frames_; 724 num_written_frames_ += packet_size_frames_;
527 } 725 }
528 } 726 }
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
598 // Creates and activates an IAudioClient COM object given the selected 796 // Creates and activates an IAudioClient COM object given the selected
599 // render endpoint device. 797 // render endpoint device.
600 HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient), 798 HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient),
601 CLSCTX_INPROC_SERVER, 799 CLSCTX_INPROC_SERVER,
602 NULL, 800 NULL,
603 audio_client.ReceiveVoid()); 801 audio_client.ReceiveVoid());
604 if (SUCCEEDED(hr)) { 802 if (SUCCEEDED(hr)) {
605 // Retrieve the stream format that the audio engine uses for its internal 803 // Retrieve the stream format that the audio engine uses for its internal
606 // processing/mixing of shared-mode streams. 804 // processing/mixing of shared-mode streams.
607 audio_engine_mix_format_.Reset(NULL); 805 audio_engine_mix_format_.Reset(NULL);
608 hr = audio_client->GetMixFormat(&audio_engine_mix_format_); 806 hr = audio_client->GetMixFormat(
807 reinterpret_cast<WAVEFORMATEX**>(&audio_engine_mix_format_));
tommi (sloooow) - chröme 2012/07/31 21:39:30 no cast should be necessary
henrika (OOO until Aug 14) 2012/08/01 16:11:09 see previous comment.
609 808
610 if (SUCCEEDED(hr)) { 809 if (SUCCEEDED(hr)) {
611 audio_client_ = audio_client; 810 audio_client_ = audio_client;
612 } 811 }
613 } 812 }
614 813
615 return hr; 814 return hr;
616 } 815 }
617 816
618 bool WASAPIAudioOutputStream::DesiredFormatIsSupported() { 817 bool WASAPIAudioOutputStream::DesiredFormatIsSupported() {
619 // Determine, before calling IAudioClient::Initialize(), whether the audio 818 // Determine, before calling IAudioClient::Initialize(), whether the audio
620 // engine supports a particular stream format. 819 // engine supports a particular stream format.
621 // In shared mode, the audio engine always supports the mix format, 820 // In shared mode, the audio engine always supports the mix format,
622 // which is stored in the |audio_engine_mix_format_| member and it is also 821 // which is stored in the |audio_engine_mix_format_| member and it is also
623 // possible to receive a proposed (closest) format if the current format is 822 // possible to receive a proposed (closest) format if the current format is
624 // not supported. 823 // not supported.
625 base::win::ScopedCoMem<WAVEFORMATEX> closest_match; 824 base::win::ScopedCoMem<WAVEFORMATEXTENSIBLE> closest_match;
626 HRESULT hr = audio_client_->IsFormatSupported(share_mode(), 825 HRESULT hr = audio_client_->IsFormatSupported(
627 &format_, 826 share_mode(), reinterpret_cast<WAVEFORMATEX*>(&format_),
tommi (sloooow) - chröme 2012/07/31 21:39:30 don't cast format_. instead use operatorT*() and r
henrika (OOO until Aug 14) 2012/08/01 16:11:09 Same comment as before. IsFormatSupported takes WA
628 &closest_match); 827 reinterpret_cast<WAVEFORMATEX**>(&closest_match));
tommi (sloooow) - chröme 2012/07/31 21:39:30 cast not needed
henrika (OOO until Aug 14) 2012/08/01 16:11:09 ditto
629 828
630 // This log can only be triggered for shared mode. 829 // This log can only be triggered for shared mode.
631 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " 830 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported "
632 << "but a closest match exists."; 831 << "but a closest match exists.";
633 // This log can be triggered both for shared and exclusive modes. 832 // This log can be triggered both for shared and exclusive modes.
634 DLOG_IF(ERROR, hr == AUDCLNT_E_UNSUPPORTED_FORMAT) << "Unsupported format."; 833 DLOG_IF(ERROR, hr == AUDCLNT_E_UNSUPPORTED_FORMAT) << "Unsupported format.";
635 if (hr == S_FALSE) { 834 if (hr == S_FALSE) {
636 DVLOG(1) << "wFormatTag : " << closest_match->wFormatTag; 835 DVLOG(1) << "wFormatTag : " << closest_match->Format.wFormatTag;
637 DVLOG(1) << "nChannels : " << closest_match->nChannels; 836 DVLOG(1) << "nChannels : " << closest_match->Format.nChannels;
638 DVLOG(1) << "nSamplesPerSec: " << closest_match->nSamplesPerSec; 837 DVLOG(1) << "nSamplesPerSec: " << closest_match->Format.nSamplesPerSec;
639 DVLOG(1) << "wBitsPerSample: " << closest_match->wBitsPerSample; 838 DVLOG(1) << "wBitsPerSample: " << closest_match->Format.wBitsPerSample;
640 } 839 }
641 840
642 return (hr == S_OK); 841 return (hr == S_OK);
643 } 842 }
644 843
645 HRESULT WASAPIAudioOutputStream::InitializeAudioEngine() { 844 HRESULT WASAPIAudioOutputStream::InitializeAudioEngine() {
646 #if !defined(NDEBUG) 845 #if !defined(NDEBUG)
647 // The period between processing passes by the audio engine is fixed for a 846 // The period between processing passes by the audio engine is fixed for a
648 // particular audio endpoint device and represents the smallest processing 847 // particular audio endpoint device and represents the smallest processing
649 // quantum for the audio engine. This period plus the stream latency between 848 // quantum for the audio engine. This period plus the stream latency between
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
720 return hr; 919 return hr;
721 } 920 }
722 921
723 HRESULT WASAPIAudioOutputStream::SharedModeInitialization() { 922 HRESULT WASAPIAudioOutputStream::SharedModeInitialization() {
724 DCHECK_EQ(share_mode(), AUDCLNT_SHAREMODE_SHARED); 923 DCHECK_EQ(share_mode(), AUDCLNT_SHAREMODE_SHARED);
725 924
726 // TODO(henrika): this buffer scheme is still under development. 925 // TODO(henrika): this buffer scheme is still under development.
727 // The exact details are yet to be determined based on tests with different 926 // The exact details are yet to be determined based on tests with different
728 // audio clients. 927 // audio clients.
729 int glitch_free_buffer_size_ms = static_cast<int>(packet_size_ms_ + 0.5); 928 int glitch_free_buffer_size_ms = static_cast<int>(packet_size_ms_ + 0.5);
730 if (audio_engine_mix_format_->nSamplesPerSec == 48000) { 929 if (audio_engine_mix_format_->Format.nSamplesPerSec == 48000) {
731 // Initial tests have shown that we have to add 10 ms extra to 930 // Initial tests have shown that we have to add 10 ms extra to
732 // ensure that we don't run empty for any packet size. 931 // ensure that we don't run empty for any packet size.
733 glitch_free_buffer_size_ms += 10; 932 glitch_free_buffer_size_ms += 10;
734 } else if (audio_engine_mix_format_->nSamplesPerSec == 44100) { 933 } else if (audio_engine_mix_format_->Format.nSamplesPerSec == 44100) {
735 // Initial tests have shown that we have to add 20 ms extra to 934 // Initial tests have shown that we have to add 20 ms extra to
736 // ensure that we don't run empty for any packet size. 935 // ensure that we don't run empty for any packet size.
737 glitch_free_buffer_size_ms += 20; 936 glitch_free_buffer_size_ms += 20;
738 } else { 937 } else {
739 glitch_free_buffer_size_ms += 20; 938 glitch_free_buffer_size_ms += 20;
740 } 939 }
741 DVLOG(1) << "glitch_free_buffer_size_ms: " << glitch_free_buffer_size_ms; 940 DVLOG(1) << "glitch_free_buffer_size_ms: " << glitch_free_buffer_size_ms;
742 REFERENCE_TIME requested_buffer_duration = 941 REFERENCE_TIME requested_buffer_duration =
743 static_cast<REFERENCE_TIME>(glitch_free_buffer_size_ms * 10000); 942 static_cast<REFERENCE_TIME>(glitch_free_buffer_size_ms * 10000);
744 943
745 // Initialize the audio stream between the client and the device. 944 // Initialize the audio stream between the client and the device.
746 // We connect indirectly through the audio engine by using shared mode 945 // We connect indirectly through the audio engine by using shared mode
747 // and WASAPI is initialized in an event driven mode. 946 // and WASAPI is initialized in an event driven mode.
748 // Note that this API ensures that the buffer is never smaller than the 947 // Note that this API ensures that the buffer is never smaller than the
749 // minimum buffer size needed to ensure glitch-free rendering. 948 // minimum buffer size needed to ensure glitch-free rendering.
750 // If we requests a buffer size that is smaller than the audio engine's 949 // If we requests a buffer size that is smaller than the audio engine's
751 // minimum required buffer size, the method sets the buffer size to this 950 // minimum required buffer size, the method sets the buffer size to this
752 // minimum buffer size rather than to the buffer size requested. 951 // minimum buffer size rather than to the buffer size requested.
753 HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED, 952 HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED,
754 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | 953 AUDCLNT_STREAMFLAGS_EVENTCALLBACK |
755 AUDCLNT_STREAMFLAGS_NOPERSIST, 954 AUDCLNT_STREAMFLAGS_NOPERSIST,
756 requested_buffer_duration, 955 requested_buffer_duration,
757 0, 956 0,
758 &format_, 957 reinterpret_cast<WAVEFORMATEX*>(&format _),
tommi (sloooow) - chröme 2012/07/31 21:39:30 use operator T*()
henrika (OOO until Aug 14) 2012/08/01 16:11:09 ditto
759 NULL); 958 NULL);
760 return hr; 959 return hr;
761 } 960 }
762 961
763 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization() { 962 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization() {
764 DCHECK_EQ(share_mode(), AUDCLNT_SHAREMODE_EXCLUSIVE); 963 DCHECK_EQ(share_mode(), AUDCLNT_SHAREMODE_EXCLUSIVE);
765 964
766 float f = (1000.0 * packet_size_frames_) / format_.nSamplesPerSec; 965 float f = (1000.0 * packet_size_frames_) / format_.Format.nSamplesPerSec;
767 REFERENCE_TIME requested_buffer_duration = 966 REFERENCE_TIME requested_buffer_duration =
768 static_cast<REFERENCE_TIME>(f * 10000.0 + 0.5); 967 static_cast<REFERENCE_TIME>(f * 10000.0 + 0.5);
769 968
770 // Initialize the audio stream between the client and the device. 969 // Initialize the audio stream between the client and the device.
771 // For an exclusive-mode stream that uses event-driven buffering, the 970 // For an exclusive-mode stream that uses event-driven buffering, the
772 // caller must specify nonzero values for hnsPeriodicity and 971 // caller must specify nonzero values for hnsPeriodicity and
773 // hnsBufferDuration, and the values of these two parameters must be equal. 972 // hnsBufferDuration, and the values of these two parameters must be equal.
774 // The Initialize method allocates two buffers for the stream. Each buffer 973 // The Initialize method allocates two buffers for the stream. Each buffer
775 // is equal in duration to the value of the hnsBufferDuration parameter. 974 // is equal in duration to the value of the hnsBufferDuration parameter.
776 // Following the Initialize call for a rendering stream, the caller should 975 // Following the Initialize call for a rendering stream, the caller should
777 // fill the first of the two buffers before starting the stream. 976 // fill the first of the two buffers before starting the stream.
778 HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE, 977 HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE,
779 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | 978 AUDCLNT_STREAMFLAGS_EVENTCALLBACK |
780 AUDCLNT_STREAMFLAGS_NOPERSIST, 979 AUDCLNT_STREAMFLAGS_NOPERSIST,
781 requested_buffer_duration, 980 requested_buffer_duration,
782 requested_buffer_duration, 981 requested_buffer_duration,
783 &format_, 982 reinterpret_cast<WAVEFORMATEX*>(&format _),
tommi (sloooow) - chröme 2012/07/31 21:39:30 operator
henrika (OOO until Aug 14) 2012/08/01 16:11:09 ditto
784 NULL); 983 NULL);
785 if (FAILED(hr)) { 984 if (FAILED(hr)) {
786 if (hr == AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) { 985 if (hr == AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) {
787 LOG(ERROR) << "AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED"; 986 LOG(ERROR) << "AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED";
788 987
789 UINT32 aligned_buffer_size = 0; 988 UINT32 aligned_buffer_size = 0;
790 audio_client_->GetBufferSize(&aligned_buffer_size); 989 audio_client_->GetBufferSize(&aligned_buffer_size);
791 DVLOG(1) << "Use aligned buffer size instead: " << aligned_buffer_size; 990 DVLOG(1) << "Use aligned buffer size instead: " << aligned_buffer_size;
792 audio_client_.Release(); 991 audio_client_.Release();
793 992
794 // Calculate new aligned periodicity. Each unit of reference time 993 // Calculate new aligned periodicity. Each unit of reference time
795 // is 100 nanoseconds. 994 // is 100 nanoseconds.
796 REFERENCE_TIME aligned_buffer_duration = static_cast<REFERENCE_TIME>( 995 REFERENCE_TIME aligned_buffer_duration = static_cast<REFERENCE_TIME>(
797 (10000000.0 * aligned_buffer_size / format_.nSamplesPerSec) + 0.5); 996 (10000000.0 * aligned_buffer_size / format_.Format.nSamplesPerSec)
997 + 0.5);
798 998
799 // It is possible to re-activate and re-initialize the audio client 999 // It is possible to re-activate and re-initialize the audio client
800 // at this stage but we bail out with an error code instead and 1000 // at this stage but we bail out with an error code instead and
801 // combine it with a log message which informs about the suggested 1001 // combine it with a log message which informs about the suggested
802 // aligned buffer size which should be used instead. 1002 // aligned buffer size which should be used instead.
803 DVLOG(1) << "aligned_buffer_duration: " 1003 DVLOG(1) << "aligned_buffer_duration: "
804 << static_cast<double>(aligned_buffer_duration / 10000.0) 1004 << static_cast<double>(aligned_buffer_duration / 10000.0)
805 << " [ms]"; 1005 << " [ms]";
806 } else if (hr == AUDCLNT_E_INVALID_DEVICE_PERIOD) { 1006 } else if (hr == AUDCLNT_E_INVALID_DEVICE_PERIOD) {
807 // We will get this error if we try to use a smaller buffer size than 1007 // We will get this error if we try to use a smaller buffer size than
(...skipping 19 matching lines...) Expand all
827 NOTREACHED() << "IMMNotificationClient should not use this method."; 1027 NOTREACHED() << "IMMNotificationClient should not use this method.";
828 if (iid == IID_IUnknown || iid == __uuidof(IMMNotificationClient)) { 1028 if (iid == IID_IUnknown || iid == __uuidof(IMMNotificationClient)) {
829 *object = static_cast < IMMNotificationClient*>(this); 1029 *object = static_cast < IMMNotificationClient*>(this);
830 } else { 1030 } else {
831 return E_NOINTERFACE; 1031 return E_NOINTERFACE;
832 } 1032 }
833 return S_OK; 1033 return S_OK;
834 } 1034 }
835 1035
836 STDMETHODIMP WASAPIAudioOutputStream::OnDeviceStateChanged(LPCWSTR device_id, 1036 STDMETHODIMP WASAPIAudioOutputStream::OnDeviceStateChanged(LPCWSTR device_id,
837 DWORD new_state) { 1037 DWORD new_state) {
838 #ifndef NDEBUG 1038 #ifndef NDEBUG
839 std::string device_name = GetDeviceName(device_id); 1039 std::string device_name = GetDeviceName(device_id);
840 std::string device_state; 1040 std::string device_state;
841 1041
842 switch (new_state) { 1042 switch (new_state) {
843 case DEVICE_STATE_ACTIVE: 1043 case DEVICE_STATE_ACTIVE:
844 device_state = "ACTIVE"; 1044 device_state = "ACTIVE";
845 break; 1045 break;
846 case DEVICE_STATE_DISABLED: 1046 case DEVICE_STATE_DISABLED:
847 device_state = "DISABLED"; 1047 device_state = "DISABLED";
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after
971 // are now re-initiated and it is now possible to re-start audio rendering. 1171 // are now re-initiated and it is now possible to re-start audio rendering.
972 1172
973 // Start rendering again using the new default audio endpoint. 1173 // Start rendering again using the new default audio endpoint.
974 hr = audio_client_->Start(); 1174 hr = audio_client_->Start();
975 1175
976 restart_rendering_mode_ = false; 1176 restart_rendering_mode_ = false;
977 return SUCCEEDED(hr); 1177 return SUCCEEDED(hr);
978 } 1178 }
979 1179
980 } // namespace media 1180 } // namespace media
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698