OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/audio/win/audio_low_latency_output_win.h" | 5 #include "media/audio/win/audio_low_latency_output_win.h" |
6 | 6 |
7 #include <Functiondiscoverykeys_devpkey.h> | 7 #include <Functiondiscoverykeys_devpkey.h> |
8 | 8 |
| 9 #include "base/command_line.h" |
9 #include "base/logging.h" | 10 #include "base/logging.h" |
10 #include "base/memory/scoped_ptr.h" | 11 #include "base/memory/scoped_ptr.h" |
11 #include "base/utf_string_conversions.h" | 12 #include "base/utf_string_conversions.h" |
12 #include "media/audio/audio_util.h" | 13 #include "media/audio/audio_util.h" |
13 #include "media/audio/win/audio_manager_win.h" | 14 #include "media/audio/win/audio_manager_win.h" |
14 #include "media/audio/win/avrt_wrapper_win.h" | 15 #include "media/audio/win/avrt_wrapper_win.h" |
| 16 #include "media/base/media_switches.h" |
15 | 17 |
16 using base::win::ScopedComPtr; | 18 using base::win::ScopedComPtr; |
17 using base::win::ScopedCOMInitializer; | 19 using base::win::ScopedCOMInitializer; |
| 20 using base::win::ScopedCoMem; |
18 | 21 |
19 namespace media { | 22 namespace media { |
20 | 23 |
| 24 // static |
| 25 AUDCLNT_SHAREMODE WASAPIAudioOutputStream::GetShareMode() { |
| 26 const CommandLine* cmd_line = CommandLine::ForCurrentProcess(); |
| 27 if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio)) |
| 28 return AUDCLNT_SHAREMODE_EXCLUSIVE; |
| 29 return AUDCLNT_SHAREMODE_SHARED; |
| 30 } |
| 31 |
21 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, | 32 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, |
22 const AudioParameters& params, | 33 const AudioParameters& params, |
23 ERole device_role) | 34 ERole device_role) |
24 : com_init_(ScopedCOMInitializer::kMTA), | 35 : com_init_(ScopedCOMInitializer::kMTA), |
25 creating_thread_id_(base::PlatformThread::CurrentId()), | 36 creating_thread_id_(base::PlatformThread::CurrentId()), |
26 manager_(manager), | 37 manager_(manager), |
27 render_thread_(NULL), | 38 render_thread_(NULL), |
28 opened_(false), | 39 opened_(false), |
29 started_(false), | 40 started_(false), |
30 restart_rendering_mode_(false), | 41 restart_rendering_mode_(false), |
31 volume_(1.0), | 42 volume_(1.0), |
32 endpoint_buffer_size_frames_(0), | 43 endpoint_buffer_size_frames_(0), |
33 device_role_(device_role), | 44 device_role_(device_role), |
| 45 share_mode_(GetShareMode()), |
34 num_written_frames_(0), | 46 num_written_frames_(0), |
35 source_(NULL) { | 47 source_(NULL) { |
36 CHECK(com_init_.succeeded()); | 48 CHECK(com_init_.succeeded()); |
37 DCHECK(manager_); | 49 DCHECK(manager_); |
38 | 50 |
39 // Load the Avrt DLL if not already loaded. Required to support MMCSS. | 51 // Load the Avrt DLL if not already loaded. Required to support MMCSS. |
40 bool avrt_init = avrt::Initialize(); | 52 bool avrt_init = avrt::Initialize(); |
41 DCHECK(avrt_init) << "Failed to load the avrt.dll"; | 53 DCHECK(avrt_init) << "Failed to load the avrt.dll"; |
42 | 54 |
| 55 if (share_mode() == AUDCLNT_SHAREMODE_EXCLUSIVE) { |
| 56 VLOG(1) << ">> Note that EXCLUSIVE MODE is enabled <<"; |
| 57 } |
| 58 |
43 // Set up the desired render format specified by the client. | 59 // Set up the desired render format specified by the client. |
44 format_.nSamplesPerSec = params.sample_rate(); | 60 format_.nSamplesPerSec = params.sample_rate(); |
45 format_.wFormatTag = WAVE_FORMAT_PCM; | 61 format_.wFormatTag = WAVE_FORMAT_PCM; |
46 format_.wBitsPerSample = params.bits_per_sample(); | 62 format_.wBitsPerSample = params.bits_per_sample(); |
47 format_.nChannels = params.channels(); | 63 format_.nChannels = params.channels(); |
48 format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels; | 64 format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels; |
49 format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign; | 65 format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign; |
50 format_.cbSize = 0; | 66 format_.cbSize = 0; |
51 | 67 |
52 // Size in bytes of each audio frame. | 68 // Size in bytes of each audio frame. |
(...skipping 27 matching lines...) Expand all Loading... |
80 WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {} | 96 WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {} |
81 | 97 |
82 bool WASAPIAudioOutputStream::Open() { | 98 bool WASAPIAudioOutputStream::Open() { |
83 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); | 99 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); |
84 if (opened_) | 100 if (opened_) |
85 return true; | 101 return true; |
86 | 102 |
87 // Create an IMMDeviceEnumerator interface and obtain a reference to | 103 // Create an IMMDeviceEnumerator interface and obtain a reference to |
88 // the IMMDevice interface of the default rendering device with the | 104 // the IMMDevice interface of the default rendering device with the |
89 // specified role. | 105 // specified role. |
90 HRESULT hr = SetRenderDevice(device_role_); | 106 HRESULT hr = SetRenderDevice(); |
91 if (FAILED(hr)) { | 107 if (FAILED(hr)) { |
92 return false; | 108 return false; |
93 } | 109 } |
94 | 110 |
95 // Obtain an IAudioClient interface which enables us to create and initialize | 111 // Obtain an IAudioClient interface which enables us to create and initialize |
96 // an audio stream between an audio application and the audio engine. | 112 // an audio stream between an audio application and the audio engine. |
97 hr = ActivateRenderDevice(); | 113 hr = ActivateRenderDevice(); |
98 if (FAILED(hr)) { | 114 if (FAILED(hr)) { |
99 return false; | 115 return false; |
100 } | 116 } |
101 | 117 |
102 // Retrieve the stream format which the audio engine uses for its internal | |
103 // processing/mixing of shared-mode streams. | |
104 hr = GetAudioEngineStreamFormat(); | |
105 if (FAILED(hr)) { | |
106 return false; | |
107 } | |
108 | |
109 // Verify that the selected audio endpoint supports the specified format | 118 // Verify that the selected audio endpoint supports the specified format |
110 // set during construction. | 119 // set during construction. |
| 120 // In exclusive mode, the client can choose to open the stream in any audio |
| 121 // format that the endpoint device supports. In shared mode, the client must |
| 122 // open the stream in the mix format that is currently in use by the audio |
| 123 // engine (or a format that is similar to the mix format). The audio engine's |
| 124 // input streams and the output mix from the engine are all in this format. |
111 if (!DesiredFormatIsSupported()) { | 125 if (!DesiredFormatIsSupported()) { |
112 return false; | 126 return false; |
113 } | 127 } |
114 | 128 |
115 // Initialize the audio stream between the client and the device using | 129 // Initialize the audio stream between the client and the device using |
116 // shared mode and a lowest possible glitch-free latency. | 130 // shared or exclusive mode and a lowest possible glitch-free latency. |
| 131 // We will enter different code paths depending on the specified share mode. |
117 hr = InitializeAudioEngine(); | 132 hr = InitializeAudioEngine(); |
118 if (FAILED(hr)) { | 133 if (FAILED(hr)) { |
119 return false; | 134 return false; |
120 } | 135 } |
121 | 136 |
122 // Register this client as an IMMNotificationClient implementation. | 137 // Register this client as an IMMNotificationClient implementation. |
123 // Only OnDefaultDeviceChanged() and OnDeviceStateChanged() and are | 138 // Only OnDefaultDeviceChanged() and OnDeviceStateChanged() and are |
124 // non-trivial. | 139 // non-trivial. |
125 hr = device_enumerator_->RegisterEndpointNotificationCallback(this); | 140 hr = device_enumerator_->RegisterEndpointNotificationCallback(this); |
126 | 141 |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
222 // Flush all pending data and reset the audio clock stream position to 0. | 237 // Flush all pending data and reset the audio clock stream position to 0. |
223 hr = audio_client_->Reset(); | 238 hr = audio_client_->Reset(); |
224 if (FAILED(hr)) { | 239 if (FAILED(hr)) { |
225 DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED) | 240 DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED) |
226 << "Failed to reset streaming: " << std::hex << hr; | 241 << "Failed to reset streaming: " << std::hex << hr; |
227 } | 242 } |
228 | 243 |
229 // Extra safety check to ensure that the buffers are cleared. | 244 // Extra safety check to ensure that the buffers are cleared. |
230 // If the buffers are not cleared correctly, the next call to Start() | 245 // If the buffers are not cleared correctly, the next call to Start() |
231 // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer(). | 246 // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer(). |
232 UINT32 num_queued_frames = 0; | 247 // This check is is only needed for shared-mode streams. |
233 audio_client_->GetCurrentPadding(&num_queued_frames); | 248 if (share_mode() == AUDCLNT_SHAREMODE_SHARED) { |
234 DCHECK_EQ(0u, num_queued_frames); | 249 UINT32 num_queued_frames = 0; |
| 250 audio_client_->GetCurrentPadding(&num_queued_frames); |
| 251 DCHECK_EQ(0u, num_queued_frames); |
| 252 } |
235 | 253 |
236 // Ensure that we don't quit the main thread loop immediately next | 254 // Ensure that we don't quit the main thread loop immediately next |
237 // time Start() is called. | 255 // time Start() is called. |
238 ResetEvent(stop_render_event_.Get()); | 256 ResetEvent(stop_render_event_.Get()); |
239 | 257 |
240 started_ = false; | 258 started_ = false; |
241 } | 259 } |
242 | 260 |
243 void WASAPIAudioOutputStream::Close() { | 261 void WASAPIAudioOutputStream::Close() { |
244 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); | 262 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); |
(...skipping 24 matching lines...) Expand all Loading... |
269 volume_ = volume_float; | 287 volume_ = volume_float; |
270 } | 288 } |
271 | 289 |
272 void WASAPIAudioOutputStream::GetVolume(double* volume) { | 290 void WASAPIAudioOutputStream::GetVolume(double* volume) { |
273 DVLOG(1) << "GetVolume()"; | 291 DVLOG(1) << "GetVolume()"; |
274 *volume = static_cast<double>(volume_); | 292 *volume = static_cast<double>(volume_); |
275 } | 293 } |
276 | 294 |
277 // static | 295 // static |
278 int WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) { | 296 int WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) { |
| 297 // Calling this function only makes sense for shared mode streams, since |
| 298 // if the device will be opened in exclusive mode, then the application |
| 299 // specified format is used instead. However, the result of this method can |
| 300 // be useful for testing purposes so we don't DCHECK here. |
| 301 DLOG_IF(WARNING, GetShareMode() == AUDCLNT_SHAREMODE_EXCLUSIVE) << |
| 302 "The mixing sample rate will be ignored for exclusive-mode streams."; |
| 303 |
279 // It is assumed that this static method is called from a COM thread, i.e., | 304 // It is assumed that this static method is called from a COM thread, i.e., |
280 // CoInitializeEx() is not called here again to avoid STA/MTA conflicts. | 305 // CoInitializeEx() is not called here again to avoid STA/MTA conflicts. |
281 ScopedComPtr<IMMDeviceEnumerator> enumerator; | 306 ScopedComPtr<IMMDeviceEnumerator> enumerator; |
282 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), | 307 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), |
283 NULL, | 308 NULL, |
284 CLSCTX_INPROC_SERVER, | 309 CLSCTX_INPROC_SERVER, |
285 __uuidof(IMMDeviceEnumerator), | 310 __uuidof(IMMDeviceEnumerator), |
286 enumerator.ReceiveVoid()); | 311 enumerator.ReceiveVoid()); |
287 if (FAILED(hr)) { | 312 if (FAILED(hr)) { |
288 NOTREACHED() << "error code: " << std::hex << hr; | 313 NOTREACHED() << "error code: " << std::hex << hr; |
(...skipping 15 matching lines...) Expand all Loading... |
304 ScopedComPtr<IAudioClient> audio_client; | 329 ScopedComPtr<IAudioClient> audio_client; |
305 hr = endpoint_device->Activate(__uuidof(IAudioClient), | 330 hr = endpoint_device->Activate(__uuidof(IAudioClient), |
306 CLSCTX_INPROC_SERVER, | 331 CLSCTX_INPROC_SERVER, |
307 NULL, | 332 NULL, |
308 audio_client.ReceiveVoid()); | 333 audio_client.ReceiveVoid()); |
309 if (FAILED(hr)) { | 334 if (FAILED(hr)) { |
310 NOTREACHED() << "error code: " << std::hex << hr; | 335 NOTREACHED() << "error code: " << std::hex << hr; |
311 return 0.0; | 336 return 0.0; |
312 } | 337 } |
313 | 338 |
| 339 // Retrieve the stream format that the audio engine uses for its internal |
| 340 // processing of shared-mode streams. |
314 base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format; | 341 base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format; |
315 hr = audio_client->GetMixFormat(&audio_engine_mix_format); | 342 hr = audio_client->GetMixFormat(&audio_engine_mix_format); |
316 if (FAILED(hr)) { | 343 if (FAILED(hr)) { |
317 NOTREACHED() << "error code: " << std::hex << hr; | 344 NOTREACHED() << "error code: " << std::hex << hr; |
318 return 0.0; | 345 return 0.0; |
319 } | 346 } |
320 | 347 |
321 return static_cast<int>(audio_engine_mix_format->nSamplesPerSec); | 348 return static_cast<int>(audio_engine_mix_format->nSamplesPerSec); |
322 } | 349 } |
323 | 350 |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
387 playing = false; | 414 playing = false; |
388 error = true; | 415 error = true; |
389 } | 416 } |
390 break; | 417 break; |
391 case WAIT_OBJECT_0 + 2: | 418 case WAIT_OBJECT_0 + 2: |
392 { | 419 { |
393 // |audio_samples_render_event_| has been set. | 420 // |audio_samples_render_event_| has been set. |
394 UINT32 num_queued_frames = 0; | 421 UINT32 num_queued_frames = 0; |
395 uint8* audio_data = NULL; | 422 uint8* audio_data = NULL; |
396 | 423 |
397 // Get the padding value which represents the amount of rendering | 424 // Contains how much new data we can write to the buffer without |
398 // data that is queued up to play in the endpoint buffer. | |
399 hr = audio_client_->GetCurrentPadding(&num_queued_frames); | |
400 | |
401 // Determine how much new data we can write to the buffer without | |
402 // the risk of overwriting previously written data that the audio | 425 // the risk of overwriting previously written data that the audio |
403 // engine has not yet read from the buffer. | 426 // engine has not yet read from the buffer. |
404 size_t num_available_frames = | 427 size_t num_available_frames = 0; |
405 endpoint_buffer_size_frames_ - num_queued_frames; | 428 |
| 429 if (share_mode() == AUDCLNT_SHAREMODE_SHARED) { |
| 430 // Get the padding value which represents the amount of rendering |
| 431 // data that is queued up to play in the endpoint buffer. |
| 432 hr = audio_client_->GetCurrentPadding(&num_queued_frames); |
| 433 num_available_frames = |
| 434 endpoint_buffer_size_frames_ - num_queued_frames; |
| 435 } else { |
| 436 // While the stream is running, the system alternately sends one |
| 437 // buffer or the other to the client. This form of double buffering |
| 438 // is referred to as "ping-ponging". Each time the client receives |
| 439 // a buffer from the system (triggers this event) the client must |
| 440 // process the entire buffer. Calls to the GetCurrentPadding method |
| 441 // are unnecessary because the packet size must always equal the |
| 442 // buffer size. In contrast to the shared mode buffering scheme, |
| 443 // the latency for an event-driven, exclusive-mode stream depends |
| 444 // directly on the buffer size. |
| 445 num_available_frames = endpoint_buffer_size_frames_; |
| 446 } |
406 | 447 |
407 // Check if there is enough available space to fit the packet size | 448 // Check if there is enough available space to fit the packet size |
408 // specified by the client. | 449 // specified by the client. |
409 if (FAILED(hr) || (num_available_frames < packet_size_frames_)) | 450 if (FAILED(hr) || (num_available_frames < packet_size_frames_)) |
410 continue; | 451 continue; |
411 | 452 |
412 // Derive the number of packets we need get from the client to | 453 // Derive the number of packets we need get from the client to |
413 // fill up the available area in the endpoint buffer. | 454 // fill up the available area in the endpoint buffer. |
| 455 // |num_packets| will always be one for exclusive-mode streams. |
414 size_t num_packets = (num_available_frames / packet_size_frames_); | 456 size_t num_packets = (num_available_frames / packet_size_frames_); |
415 | 457 |
416 // Get data from the client/source. | 458 // Get data from the client/source. |
417 for (size_t n = 0; n < num_packets; ++n) { | 459 for (size_t n = 0; n < num_packets; ++n) { |
418 // Grab all available space in the rendering endpoint buffer | 460 // Grab all available space in the rendering endpoint buffer |
419 // into which the client can write a data packet. | 461 // into which the client can write a data packet. |
420 hr = audio_render_client_->GetBuffer(packet_size_frames_, | 462 hr = audio_render_client_->GetBuffer(packet_size_frames_, |
421 &audio_data); | 463 &audio_data); |
422 if (FAILED(hr)) { | 464 if (FAILED(hr)) { |
423 DLOG(ERROR) << "Failed to use rendering audio buffer: " | 465 DLOG(ERROR) << "Failed to use rendering audio buffer: " |
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
504 PLOG(WARNING) << "Failed to disable MMCSS"; | 546 PLOG(WARNING) << "Failed to disable MMCSS"; |
505 } | 547 } |
506 } | 548 } |
507 | 549 |
508 void WASAPIAudioOutputStream::HandleError(HRESULT err) { | 550 void WASAPIAudioOutputStream::HandleError(HRESULT err) { |
509 NOTREACHED() << "Error code: " << std::hex << err; | 551 NOTREACHED() << "Error code: " << std::hex << err; |
510 if (source_) | 552 if (source_) |
511 source_->OnError(this, static_cast<int>(err)); | 553 source_->OnError(this, static_cast<int>(err)); |
512 } | 554 } |
513 | 555 |
514 HRESULT WASAPIAudioOutputStream::SetRenderDevice(ERole device_role) { | 556 HRESULT WASAPIAudioOutputStream::SetRenderDevice() { |
| 557 ScopedComPtr<IMMDeviceEnumerator> device_enumerator; |
| 558 ScopedComPtr<IMMDevice> endpoint_device; |
| 559 |
515 // Create the IMMDeviceEnumerator interface. | 560 // Create the IMMDeviceEnumerator interface. |
516 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), | 561 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), |
517 NULL, | 562 NULL, |
518 CLSCTX_INPROC_SERVER, | 563 CLSCTX_INPROC_SERVER, |
519 __uuidof(IMMDeviceEnumerator), | 564 __uuidof(IMMDeviceEnumerator), |
520 device_enumerator_.ReceiveVoid()); | 565 device_enumerator.ReceiveVoid()); |
521 if (SUCCEEDED(hr)) { | 566 if (SUCCEEDED(hr)) { |
522 // Retrieve the default render audio endpoint for the specified role. | 567 // Retrieve the default render audio endpoint for the specified role. |
523 // Note that, in Windows Vista, the MMDevice API supports device roles | 568 // Note that, in Windows Vista, the MMDevice API supports device roles |
524 // but the system-supplied user interface programs do not. | 569 // but the system-supplied user interface programs do not. |
525 hr = device_enumerator_->GetDefaultAudioEndpoint( | 570 hr = device_enumerator->GetDefaultAudioEndpoint( |
526 eRender, device_role, endpoint_device_.Receive()); | 571 eRender, device_role_, endpoint_device.Receive()); |
527 if (FAILED(hr)) | 572 if (FAILED(hr)) |
528 return hr; | 573 return hr; |
529 | 574 |
530 // Verify that the audio endpoint device is active. That is, the audio | 575 // Verify that the audio endpoint device is active. That is, the audio |
531 // adapter that connects to the endpoint device is present and enabled. | 576 // adapter that connects to the endpoint device is present and enabled. |
532 DWORD state = DEVICE_STATE_DISABLED; | 577 DWORD state = DEVICE_STATE_DISABLED; |
533 hr = endpoint_device_->GetState(&state); | 578 hr = endpoint_device->GetState(&state); |
534 if (SUCCEEDED(hr)) { | 579 if (SUCCEEDED(hr)) { |
535 if (!(state & DEVICE_STATE_ACTIVE)) { | 580 if (!(state & DEVICE_STATE_ACTIVE)) { |
536 DLOG(ERROR) << "Selected render device is not active."; | 581 DLOG(ERROR) << "Selected render device is not active."; |
537 hr = E_ACCESSDENIED; | 582 hr = E_ACCESSDENIED; |
538 } | 583 } |
539 } | 584 } |
540 } | 585 } |
541 | 586 |
| 587 if (SUCCEEDED(hr)) { |
| 588 device_enumerator_ = device_enumerator; |
| 589 endpoint_device_ = endpoint_device; |
| 590 } |
| 591 |
| 592 return hr; |
| 593 } |
| 594 |
| 595 HRESULT WASAPIAudioOutputStream::ActivateRenderDevice() { |
| 596 ScopedComPtr<IAudioClient> audio_client; |
| 597 |
| 598 // Creates and activates an IAudioClient COM object given the selected |
| 599 // render endpoint device. |
| 600 HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient), |
| 601 CLSCTX_INPROC_SERVER, |
| 602 NULL, |
| 603 audio_client.ReceiveVoid()); |
| 604 if (SUCCEEDED(hr)) { |
| 605 // Retrieve the stream format that the audio engine uses for its internal |
| 606 // processing/mixing of shared-mode streams. |
| 607 audio_engine_mix_format_.Reset(NULL); |
| 608 hr = audio_client->GetMixFormat(&audio_engine_mix_format_); |
| 609 |
| 610 if (SUCCEEDED(hr)) { |
| 611 audio_client_ = audio_client; |
| 612 } |
| 613 } |
| 614 |
542 return hr; | 615 return hr; |
543 } | 616 } |
544 | 617 |
545 HRESULT WASAPIAudioOutputStream::ActivateRenderDevice() { | |
546 // Creates and activates an IAudioClient COM object given the selected | |
547 // render endpoint device. | |
548 HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient), | |
549 CLSCTX_INPROC_SERVER, | |
550 NULL, | |
551 audio_client_.ReceiveVoid()); | |
552 return hr; | |
553 } | |
554 | |
555 HRESULT WASAPIAudioOutputStream::GetAudioEngineStreamFormat() { | |
556 // Retrieve the stream format that the audio engine uses for its internal | |
557 // processing/mixing of shared-mode streams. | |
558 return audio_client_->GetMixFormat(&audio_engine_mix_format_); | |
559 } | |
560 | |
561 bool WASAPIAudioOutputStream::DesiredFormatIsSupported() { | 618 bool WASAPIAudioOutputStream::DesiredFormatIsSupported() { |
| 619 // Determine, before calling IAudioClient::Initialize(), whether the audio |
| 620 // engine supports a particular stream format. |
562 // In shared mode, the audio engine always supports the mix format, | 621 // In shared mode, the audio engine always supports the mix format, |
563 // which is stored in the |audio_engine_mix_format_| member. In addition, | 622 // which is stored in the |audio_engine_mix_format_| member and it is also |
564 // the audio engine *might* support similar formats that have the same | 623 // possible to receive a proposed (closest) format if the current format is |
565 // sample rate and number of channels as the mix format but differ in | 624 // not supported. |
566 // the representation of audio sample values. | |
567 base::win::ScopedCoMem<WAVEFORMATEX> closest_match; | 625 base::win::ScopedCoMem<WAVEFORMATEX> closest_match; |
568 HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, | 626 HRESULT hr = audio_client_->IsFormatSupported(share_mode(), |
569 &format_, | 627 &format_, |
570 &closest_match); | 628 &closest_match); |
| 629 |
| 630 // This log can only be triggered for shared mode. |
571 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " | 631 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " |
572 << "but a closest match exists."; | 632 << "but a closest match exists."; |
| 633 // This log can be triggered both for shared and exclusive modes. |
| 634 DLOG_IF(ERROR, hr == AUDCLNT_E_UNSUPPORTED_FORMAT) << "Unsupported format."; |
| 635 if (hr == S_FALSE) { |
| 636 DVLOG(1) << "wFormatTag : " << closest_match->wFormatTag; |
| 637 DVLOG(1) << "nChannels : " << closest_match->nChannels; |
| 638 DVLOG(1) << "nSamplesPerSec: " << closest_match->nSamplesPerSec; |
| 639 DVLOG(1) << "wBitsPerSample: " << closest_match->wBitsPerSample; |
| 640 } |
| 641 |
573 return (hr == S_OK); | 642 return (hr == S_OK); |
574 } | 643 } |
575 | 644 |
576 HRESULT WASAPIAudioOutputStream::InitializeAudioEngine() { | 645 HRESULT WASAPIAudioOutputStream::InitializeAudioEngine() { |
| 646 #if !defined(NDEBUG) |
| 647 // The period between processing passes by the audio engine is fixed for a |
| 648 // particular audio endpoint device and represents the smallest processing |
| 649 // quantum for the audio engine. This period plus the stream latency between |
| 650 // the buffer and endpoint device represents the minimum possible latency |
| 651 // that an audio application can achieve in shared mode. |
| 652 { |
| 653 REFERENCE_TIME default_device_period = 0; |
| 654 REFERENCE_TIME minimum_device_period = 0; |
| 655 HRESULT hr_dbg = audio_client_->GetDevicePeriod(&default_device_period, |
| 656 &minimum_device_period); |
| 657 if (SUCCEEDED(hr_dbg)) { |
| 658 // Shared mode device period. |
| 659 DVLOG(1) << "shared mode (default) device period: " |
| 660 << static_cast<double>(default_device_period / 10000.0) |
| 661 << " [ms]"; |
| 662 // Exclusive mode device period. |
| 663 DVLOG(1) << "exclusive mode (minimum) device period: " |
| 664 << static_cast<double>(minimum_device_period / 10000.0) |
| 665 << " [ms]"; |
| 666 } |
| 667 |
| 668 REFERENCE_TIME latency = 0; |
| 669 hr_dbg = audio_client_->GetStreamLatency(&latency); |
| 670 if (SUCCEEDED(hr_dbg)) { |
| 671 DVLOG(1) << "stream latency: " << static_cast<double>(latency / 10000.0) |
| 672 << " [ms]"; |
| 673 } |
| 674 } |
| 675 #endif |
| 676 |
| 677 HRESULT hr = S_FALSE; |
| 678 |
| 679 // Perform different initialization depending on if the device shall be |
| 680 // opened in shared mode or in exclusive mode. |
| 681 hr = (share_mode() == AUDCLNT_SHAREMODE_SHARED) ? |
| 682 SharedModeInitialization() : ExclusiveModeInitialization(); |
| 683 if (FAILED(hr)) { |
| 684 LOG(WARNING) << "IAudioClient::Initialize() failed: " << std::hex << hr; |
| 685 return hr; |
| 686 } |
| 687 |
| 688 // Retrieve the length of the endpoint buffer. The buffer length represents |
| 689 // the maximum amount of rendering data that the client can write to |
| 690 // the endpoint buffer during a single processing pass. |
| 691 // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. |
| 692 hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_); |
| 693 if (FAILED(hr)) |
| 694 return hr; |
| 695 DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_ |
| 696 << " [frames]"; |
| 697 |
| 698 // The buffer scheme for exclusive mode streams is not designed for max |
| 699 // flexibility. We only allow a "perfect match" between the packet size set |
| 700 // by the user and the actual endpoint buffer size. |
| 701 if (share_mode() == AUDCLNT_SHAREMODE_EXCLUSIVE && |
| 702 endpoint_buffer_size_frames_ != packet_size_frames_) { |
| 703 hr = AUDCLNT_E_INVALID_SIZE; |
| 704 DLOG(ERROR) << "AUDCLNT_E_INVALID_SIZE"; |
| 705 return hr; |
| 706 } |
| 707 |
| 708 // Set the event handle that the audio engine will signal each time |
| 709 // a buffer becomes ready to be processed by the client. |
| 710 hr = audio_client_->SetEventHandle(audio_samples_render_event_.Get()); |
| 711 if (FAILED(hr)) |
| 712 return hr; |
| 713 |
| 714 // Get access to the IAudioRenderClient interface. This interface |
| 715 // enables us to write output data to a rendering endpoint buffer. |
| 716 // The methods in this interface manage the movement of data packets |
| 717 // that contain audio-rendering data. |
| 718 hr = audio_client_->GetService(__uuidof(IAudioRenderClient), |
| 719 audio_render_client_.ReceiveVoid()); |
| 720 return hr; |
| 721 } |
| 722 |
| 723 HRESULT WASAPIAudioOutputStream::SharedModeInitialization() { |
| 724 DCHECK_EQ(share_mode(), AUDCLNT_SHAREMODE_SHARED); |
| 725 |
577 // TODO(henrika): this buffer scheme is still under development. | 726 // TODO(henrika): this buffer scheme is still under development. |
578 // The exact details are yet to be determined based on tests with different | 727 // The exact details are yet to be determined based on tests with different |
579 // audio clients. | 728 // audio clients. |
580 int glitch_free_buffer_size_ms = static_cast<int>(packet_size_ms_ + 0.5); | 729 int glitch_free_buffer_size_ms = static_cast<int>(packet_size_ms_ + 0.5); |
581 if (audio_engine_mix_format_->nSamplesPerSec == 48000) { | 730 if (audio_engine_mix_format_->nSamplesPerSec == 48000) { |
582 // Initial tests have shown that we have to add 10 ms extra to | 731 // Initial tests have shown that we have to add 10 ms extra to |
583 // ensure that we don't run empty for any packet size. | 732 // ensure that we don't run empty for any packet size. |
584 glitch_free_buffer_size_ms += 10; | 733 glitch_free_buffer_size_ms += 10; |
585 } else if (audio_engine_mix_format_->nSamplesPerSec == 44100) { | 734 } else if (audio_engine_mix_format_->nSamplesPerSec == 44100) { |
586 // Initial tests have shown that we have to add 20 ms extra to | 735 // Initial tests have shown that we have to add 20 ms extra to |
587 // ensure that we don't run empty for any packet size. | 736 // ensure that we don't run empty for any packet size. |
588 glitch_free_buffer_size_ms += 20; | 737 glitch_free_buffer_size_ms += 20; |
589 } else { | 738 } else { |
590 glitch_free_buffer_size_ms += 20; | 739 glitch_free_buffer_size_ms += 20; |
591 } | 740 } |
592 DVLOG(1) << "glitch_free_buffer_size_ms: " << glitch_free_buffer_size_ms; | 741 DVLOG(1) << "glitch_free_buffer_size_ms: " << glitch_free_buffer_size_ms; |
593 REFERENCE_TIME requested_buffer_duration_hns = | 742 REFERENCE_TIME requested_buffer_duration = |
594 static_cast<REFERENCE_TIME>(glitch_free_buffer_size_ms * 10000); | 743 static_cast<REFERENCE_TIME>(glitch_free_buffer_size_ms * 10000); |
595 | 744 |
596 // Initialize the audio stream between the client and the device. | 745 // Initialize the audio stream between the client and the device. |
597 // We connect indirectly through the audio engine by using shared mode | 746 // We connect indirectly through the audio engine by using shared mode |
598 // and WASAPI is initialized in an event driven mode. | 747 // and WASAPI is initialized in an event driven mode. |
599 // Note that this API ensures that the buffer is never smaller than the | 748 // Note that this API ensures that the buffer is never smaller than the |
600 // minimum buffer size needed to ensure glitch-free rendering. | 749 // minimum buffer size needed to ensure glitch-free rendering. |
601 // If we requests a buffer size that is smaller than the audio engine's | 750 // If we requests a buffer size that is smaller than the audio engine's |
602 // minimum required buffer size, the method sets the buffer size to this | 751 // minimum required buffer size, the method sets the buffer size to this |
603 // minimum buffer size rather than to the buffer size requested. | 752 // minimum buffer size rather than to the buffer size requested. |
604 HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED, | 753 HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED, |
605 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | | 754 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | |
606 AUDCLNT_STREAMFLAGS_NOPERSIST, | 755 AUDCLNT_STREAMFLAGS_NOPERSIST, |
607 requested_buffer_duration_hns, | 756 requested_buffer_duration, |
608 0, | 757 0, |
609 &format_, | 758 &format_, |
610 NULL); | 759 NULL); |
611 if (FAILED(hr)) | |
612 return hr; | |
613 | |
614 // Retrieve the length of the endpoint buffer shared between the client | |
615 // and the audio engine. The buffer length the buffer length determines | |
616 // the maximum amount of rendering data that the client can write to | |
617 // the endpoint buffer during a single processing pass. | |
618 // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. | |
619 hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_); | |
620 if (FAILED(hr)) | |
621 return hr; | |
622 DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_ | |
623 << " [frames]"; | |
624 #ifndef NDEBUG | |
625 // The period between processing passes by the audio engine is fixed for a | |
626 // particular audio endpoint device and represents the smallest processing | |
627 // quantum for the audio engine. This period plus the stream latency between | |
628 // the buffer and endpoint device represents the minimum possible latency | |
629 // that an audio application can achieve in shared mode. | |
630 REFERENCE_TIME default_device_period = 0; | |
631 REFERENCE_TIME minimum_device_period = 0; | |
632 HRESULT hr_dbg = audio_client_->GetDevicePeriod(&default_device_period, | |
633 &minimum_device_period); | |
634 if (SUCCEEDED(hr_dbg)) { | |
635 // Shared mode device period. | |
636 DVLOG(1) << "default device period: " | |
637 << static_cast<double>(default_device_period / 10000.0) | |
638 << " [ms]"; | |
639 // Exclusive mode device period. | |
640 DVLOG(1) << "minimum device period: " | |
641 << static_cast<double>(minimum_device_period / 10000.0) | |
642 << " [ms]"; | |
643 } | |
644 | |
645 REFERENCE_TIME latency = 0; | |
646 hr_dbg = audio_client_->GetStreamLatency(&latency); | |
647 if (SUCCEEDED(hr_dbg)) { | |
648 DVLOG(1) << "stream latency: " << static_cast<double>(latency / 10000.0) | |
649 << " [ms]"; | |
650 } | |
651 #endif | |
652 | |
653 // Set the event handle that the audio engine will signal each time | |
654 // a buffer becomes ready to be processed by the client. | |
655 hr = audio_client_->SetEventHandle(audio_samples_render_event_.Get()); | |
656 if (FAILED(hr)) | |
657 return hr; | |
658 | |
659 // Get access to the IAudioRenderClient interface. This interface | |
660 // enables us to write output data to a rendering endpoint buffer. | |
661 // The methods in this interface manage the movement of data packets | |
662 // that contain audio-rendering data. | |
663 hr = audio_client_->GetService(__uuidof(IAudioRenderClient), | |
664 audio_render_client_.ReceiveVoid()); | |
665 return hr; | 760 return hr; |
666 } | 761 } |
667 | 762 |
| 763 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization() { |
| 764 DCHECK_EQ(share_mode(), AUDCLNT_SHAREMODE_EXCLUSIVE); |
| 765 |
| 766 float f = (1000.0 * packet_size_frames_) / format_.nSamplesPerSec; |
| 767 REFERENCE_TIME requested_buffer_duration = |
| 768 static_cast<REFERENCE_TIME>(f * 10000.0 + 0.5); |
| 769 |
| 770 // Initialize the audio stream between the client and the device. |
| 771 // For an exclusive-mode stream that uses event-driven buffering, the |
| 772 // caller must specify nonzero values for hnsPeriodicity and |
| 773 // hnsBufferDuration, and the values of these two parameters must be equal. |
| 774 // The Initialize method allocates two buffers for the stream. Each buffer |
| 775 // is equal in duration to the value of the hnsBufferDuration parameter. |
| 776 // Following the Initialize call for a rendering stream, the caller should |
| 777 // fill the first of the two buffers before starting the stream. |
| 778 HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE, |
| 779 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | |
| 780 AUDCLNT_STREAMFLAGS_NOPERSIST, |
| 781 requested_buffer_duration, |
| 782 requested_buffer_duration, |
| 783 &format_, |
| 784 NULL); |
| 785 if (FAILED(hr)) { |
| 786 if (hr == AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) { |
| 787 LOG(ERROR) << "AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED"; |
| 788 |
| 789 UINT32 aligned_buffer_size = 0; |
| 790 audio_client_->GetBufferSize(&aligned_buffer_size); |
| 791 DVLOG(1) << "Use aligned buffer size instead: " << aligned_buffer_size; |
| 792 audio_client_.Release(); |
| 793 |
| 794 // Calculate new aligned periodicity. Each unit of reference time |
| 795 // is 100 nanoseconds. |
| 796 REFERENCE_TIME aligned_buffer_duration = static_cast<REFERENCE_TIME>( |
| 797 (10000000.0 * aligned_buffer_size / format_.nSamplesPerSec) + 0.5); |
| 798 |
| 799 // It is possible to re-activate and re-initialize the audio client |
| 800 // at this stage but we bail out with an error code instead and |
| 801 // combine it with a log message which informs about the suggested |
| 802 // aligned buffer size which should be used instead. |
| 803 DVLOG(1) << "aligned_buffer_duration: " |
| 804 << static_cast<double>(aligned_buffer_duration / 10000.0) |
| 805 << " [ms]"; |
| 806 } else if (hr == AUDCLNT_E_INVALID_DEVICE_PERIOD) { |
| 807 // We will get this error if we try to use a smaller buffer size than |
| 808 // the minimum supported size (usually ~3ms on Windows 7). |
| 809 LOG(ERROR) << "AUDCLNT_E_INVALID_DEVICE_PERIOD"; |
| 810 } |
| 811 } |
| 812 |
| 813 return hr; |
| 814 } |
| 815 |
668 ULONG WASAPIAudioOutputStream::AddRef() { | 816 ULONG WASAPIAudioOutputStream::AddRef() { |
669 NOTREACHED() << "IMMNotificationClient should not use this method."; | 817 NOTREACHED() << "IMMNotificationClient should not use this method."; |
670 return 1; | 818 return 1; |
671 } | 819 } |
672 | 820 |
673 ULONG WASAPIAudioOutputStream::Release() { | 821 ULONG WASAPIAudioOutputStream::Release() { |
674 NOTREACHED() << "IMMNotificationClient should not use this method."; | 822 NOTREACHED() << "IMMNotificationClient should not use this method."; |
675 return 1; | 823 return 1; |
676 } | 824 } |
677 | 825 |
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
823 // are now re-initiated and it is now possible to re-start audio rendering. | 971 // are now re-initiated and it is now possible to re-start audio rendering. |
824 | 972 |
825 // Start rendering again using the new default audio endpoint. | 973 // Start rendering again using the new default audio endpoint. |
826 hr = audio_client_->Start(); | 974 hr = audio_client_->Start(); |
827 | 975 |
828 restart_rendering_mode_ = false; | 976 restart_rendering_mode_ = false; |
829 return SUCCEEDED(hr); | 977 return SUCCEEDED(hr); |
830 } | 978 } |
831 | 979 |
832 } // namespace media | 980 } // namespace media |
OLD | NEW |