Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(6)

Side by Side Diff: media/audio/win/audio_low_latency_output_win.cc

Issue 12220076: Ensures that WASAPI audio output does not go silent in rare cases (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: nits Created 7 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « media/audio/win/audio_low_latency_output_win.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "media/audio/win/audio_low_latency_output_win.h" 5 #include "media/audio/win/audio_low_latency_output_win.h"
6 6
7 #include <Functiondiscoverykeys_devpkey.h> 7 #include <Functiondiscoverykeys_devpkey.h>
8 8
9 #include "base/command_line.h" 9 #include "base/command_line.h"
10 #include "base/debug/trace_event.h" 10 #include "base/debug/trace_event.h"
(...skipping 432 matching lines...) Expand 10 before | Expand all | Expand 10 after
443 wait_array, 443 wait_array,
444 FALSE, 444 FALSE,
445 INFINITE); 445 INFINITE);
446 446
447 switch (wait_result) { 447 switch (wait_result) {
448 case WAIT_OBJECT_0 + 0: 448 case WAIT_OBJECT_0 + 0:
449 // |stop_render_event_| has been set. 449 // |stop_render_event_| has been set.
450 playing = false; 450 playing = false;
451 break; 451 break;
452 case WAIT_OBJECT_0 + 1: 452 case WAIT_OBJECT_0 + 1:
453 { 453 // |audio_samples_render_event_| has been set.
454 TRACE_EVENT0("audio", "WASAPIAudioOutputStream::Run"); 454 RenderAudioFromSource(audio_clock, device_frequency);
455
456 // |audio_samples_render_event_| has been set.
457 UINT32 num_queued_frames = 0;
458 uint8* audio_data = NULL;
459
460 // Contains how much new data we can write to the buffer without
461 // the risk of overwriting previously written data that the audio
462 // engine has not yet read from the buffer.
463 size_t num_available_frames = 0;
464
465 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
466 // Get the padding value which represents the amount of rendering
467 // data that is queued up to play in the endpoint buffer.
468 hr = audio_client_->GetCurrentPadding(&num_queued_frames);
469 num_available_frames =
470 endpoint_buffer_size_frames_ - num_queued_frames;
471 } else {
472 // While the stream is running, the system alternately sends one
473 // buffer or the other to the client. This form of double buffering
474 // is referred to as "ping-ponging". Each time the client receives
475 // a buffer from the system (triggers this event) the client must
476 // process the entire buffer. Calls to the GetCurrentPadding method
477 // are unnecessary because the packet size must always equal the
478 // buffer size. In contrast to the shared mode buffering scheme,
479 // the latency for an event-driven, exclusive-mode stream depends
480 // directly on the buffer size.
481 num_available_frames = endpoint_buffer_size_frames_;
482 }
483 if (FAILED(hr)) {
484 DLOG(ERROR) << "Failed to retrieve amount of available space: "
485 << std::hex << hr;
486 continue;
487 }
488
489 // It can happen that we were not able to find a a perfect match
490 // between the native device rate and the endpoint buffer size.
491 // In this case, we are using a packet size which equals the enpoint
492 // buffer size (does not lead to lowest possible delay and is rare
493 // case) and must therefore wait for yet another callback until we
494 // are able to provide data.
495 if ((num_available_frames > 0) &&
496 (num_available_frames != packet_size_frames_)) {
497 continue;
498 }
499
500 // Grab all available space in the rendering endpoint buffer
501 // into which the client can write a data packet.
502 hr = audio_render_client_->GetBuffer(packet_size_frames_,
503 &audio_data);
504 if (FAILED(hr)) {
505 DLOG(ERROR) << "Failed to use rendering audio buffer: "
506 << std::hex << hr;
507 continue;
508 }
509
510 // Derive the audio delay which corresponds to the delay between
511 // a render event and the time when the first audio sample in a
512 // packet is played out through the speaker. This delay value
513 // can typically be utilized by an acoustic echo-control (AEC)
514 // unit at the render side.
515 UINT64 position = 0;
516 int audio_delay_bytes = 0;
517 hr = audio_clock->GetPosition(&position, NULL);
518 if (SUCCEEDED(hr)) {
519 // Stream position of the sample that is currently playing
520 // through the speaker.
521 double pos_sample_playing_frames = format_.Format.nSamplesPerSec *
522 (static_cast<double>(position) / device_frequency);
523
524 // Stream position of the last sample written to the endpoint
525 // buffer. Note that, the packet we are about to receive in
526 // the upcoming callback is also included.
527 size_t pos_last_sample_written_frames =
528 num_written_frames_ + packet_size_frames_;
529
530 // Derive the actual delay value which will be fed to the
531 // render client using the OnMoreData() callback.
532 audio_delay_bytes = (pos_last_sample_written_frames -
533 pos_sample_playing_frames) * format_.Format.nBlockAlign;
534 }
535
536 // Read a data packet from the registered client source and
537 // deliver a delay estimate in the same callback to the client.
538 // A time stamp is also stored in the AudioBuffersState. This
539 // time stamp can be used at the client side to compensate for
540 // the delay between the usage of the delay value and the time
541 // of generation.
542
543 uint32 num_filled_bytes = 0;
544 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3;
545
546 int frames_filled = source_->OnMoreData(
547 audio_bus_.get(), AudioBuffersState(0, audio_delay_bytes));
548 num_filled_bytes = frames_filled * format_.Format.nBlockAlign;
549 DCHECK_LE(num_filled_bytes, packet_size_bytes_);
550
551 // Note: If this ever changes to output raw float the data must be
552 // clipped and sanitized since it may come from an untrusted
553 // source such as NaCl.
554 audio_bus_->ToInterleaved(
555 frames_filled, bytes_per_sample, audio_data);
556
557 // Perform in-place, software-volume adjustments.
558 media::AdjustVolume(audio_data,
559 num_filled_bytes,
560 audio_bus_->channels(),
561 bytes_per_sample,
562 volume_);
563
564 // Zero out the part of the packet which has not been filled by
565 // the client. Using silence is the least bad option in this
566 // situation.
567 if (num_filled_bytes < packet_size_bytes_) {
568 memset(&audio_data[num_filled_bytes], 0,
569 (packet_size_bytes_ - num_filled_bytes));
570 }
571
572 // Release the buffer space acquired in the GetBuffer() call.
573 DWORD flags = 0;
574 audio_render_client_->ReleaseBuffer(packet_size_frames_,
575 flags);
576
577 num_written_frames_ += packet_size_frames_;
578 }
579 break; 455 break;
580 default: 456 default:
581 error = true; 457 error = true;
582 break; 458 break;
583 } 459 }
584 } 460 }
585 461
586 if (playing && error) { 462 if (playing && error) {
587 // Stop audio rendering since something has gone wrong in our main thread 463 // Stop audio rendering since something has gone wrong in our main thread
588 // loop. Note that, we are still in a "started" state, hence a Stop() call 464 // loop. Note that, we are still in a "started" state, hence a Stop() call
589 // is required to join the thread properly. 465 // is required to join the thread properly.
590 audio_client_->Stop(); 466 audio_client_->Stop();
591 PLOG(ERROR) << "WASAPI rendering failed."; 467 PLOG(ERROR) << "WASAPI rendering failed.";
592 } 468 }
593 469
594 // Disable MMCSS. 470 // Disable MMCSS.
595 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { 471 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) {
596 PLOG(WARNING) << "Failed to disable MMCSS"; 472 PLOG(WARNING) << "Failed to disable MMCSS";
597 } 473 }
598 } 474 }
599 475
476 void WASAPIAudioOutputStream::RenderAudioFromSource(
477 IAudioClock* audio_clock, UINT64 device_frequency) {
478 TRACE_EVENT0("audio", "RenderAudioFromSource");
479
480 HRESULT hr = S_FALSE;
481 UINT32 num_queued_frames = 0;
482 uint8* audio_data = NULL;
483
484 // Contains how much new data we can write to the buffer without
485 // the risk of overwriting previously written data that the audio
486 // engine has not yet read from the buffer.
487 size_t num_available_frames = 0;
488
489 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
490 // Get the padding value which represents the amount of rendering
491 // data that is queued up to play in the endpoint buffer.
492 hr = audio_client_->GetCurrentPadding(&num_queued_frames);
493 num_available_frames =
494 endpoint_buffer_size_frames_ - num_queued_frames;
495 if (FAILED(hr)) {
496 DLOG(ERROR) << "Failed to retrieve amount of available space: "
497 << std::hex << hr;
498 return;
499 }
500 } else {
501 // While the stream is running, the system alternately sends one
502 // buffer or the other to the client. This form of double buffering
503 // is referred to as "ping-ponging". Each time the client receives
504 // a buffer from the system (triggers this event) the client must
505 // process the entire buffer. Calls to the GetCurrentPadding method
506 // are unnecessary because the packet size must always equal the
507 // buffer size. In contrast to the shared mode buffering scheme,
508 // the latency for an event-driven, exclusive-mode stream depends
509 // directly on the buffer size.
510 num_available_frames = endpoint_buffer_size_frames_;
511 }
512
513 // Check if there is enough available space to fit the packet size
514 // specified by the client.
515 if (num_available_frames < packet_size_frames_)
516 return;
517
518 DLOG_IF(ERROR, num_available_frames % packet_size_frames_ != 0)
519 << "Non-perfect timing detected (num_available_frames="
520 << num_available_frames << ", packet_size_frames="
521 << packet_size_frames_ << ")";
522
523 // Derive the number of packets we need to get from the client to
524 // fill up the available area in the endpoint buffer.
525 // |num_packets| will always be one for exclusive-mode streams and
526 // will be one in most cases for shared mode streams as well.
527 // However, we have found that two packets can sometimes be
528 // required.
529 size_t num_packets = (num_available_frames / packet_size_frames_);
530
531 for (size_t n = 0; n < num_packets; ++n) {
532 // Grab all available space in the rendering endpoint buffer
533 // into which the client can write a data packet.
534 hr = audio_render_client_->GetBuffer(packet_size_frames_,
535 &audio_data);
536 if (FAILED(hr)) {
537 DLOG(ERROR) << "Failed to use rendering audio buffer: "
538 << std::hex << hr;
539 return;
540 }
541
542 // Derive the audio delay which corresponds to the delay between
543 // a render event and the time when the first audio sample in a
544 // packet is played out through the speaker. This delay value
545 // can typically be utilized by an acoustic echo-control (AEC)
546 // unit at the render side.
547 UINT64 position = 0;
548 int audio_delay_bytes = 0;
549 hr = audio_clock->GetPosition(&position, NULL);
550 if (SUCCEEDED(hr)) {
551 // Stream position of the sample that is currently playing
552 // through the speaker.
553 double pos_sample_playing_frames = format_.Format.nSamplesPerSec *
554 (static_cast<double>(position) / device_frequency);
555
556 // Stream position of the last sample written to the endpoint
557 // buffer. Note that, the packet we are about to receive in
558 // the upcoming callback is also included.
559 size_t pos_last_sample_written_frames =
560 num_written_frames_ + packet_size_frames_;
561
562 // Derive the actual delay value which will be fed to the
563 // render client using the OnMoreData() callback.
564 audio_delay_bytes = (pos_last_sample_written_frames -
565 pos_sample_playing_frames) * format_.Format.nBlockAlign;
566 }
567
568 // Read a data packet from the registered client source and
569 // deliver a delay estimate in the same callback to the client.
570 // A time stamp is also stored in the AudioBuffersState. This
571 // time stamp can be used at the client side to compensate for
572 // the delay between the usage of the delay value and the time
573 // of generation.
574
575 int frames_filled = source_->OnMoreData(
576 audio_bus_.get(), AudioBuffersState(0, audio_delay_bytes));
577 uint32 num_filled_bytes = frames_filled * format_.Format.nBlockAlign;
578 DCHECK_LE(num_filled_bytes, packet_size_bytes_);
579
580 // Note: If this ever changes to output raw float the data must be
581 // clipped and sanitized since it may come from an untrusted
582 // source such as NaCl.
583 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3;
584 audio_bus_->ToInterleaved(
585 frames_filled, bytes_per_sample, audio_data);
586
587 // Perform in-place, software-volume adjustments.
588 media::AdjustVolume(audio_data,
589 num_filled_bytes,
590 audio_bus_->channels(),
591 bytes_per_sample,
592 volume_);
593
594 // Release the buffer space acquired in the GetBuffer() call.
595 // Render silence if we were not able to fill up the buffer totally.
596 DWORD flags = (num_filled_bytes < packet_size_bytes_) ?
597 AUDCLNT_BUFFERFLAGS_SILENT : 0;
598 audio_render_client_->ReleaseBuffer(packet_size_frames_, flags);
599
600 num_written_frames_ += packet_size_frames_;
601 }
602 }
603
600 void WASAPIAudioOutputStream::HandleError(HRESULT err) { 604 void WASAPIAudioOutputStream::HandleError(HRESULT err) {
601 CHECK((started() && GetCurrentThreadId() == render_thread_->tid()) || 605 CHECK((started() && GetCurrentThreadId() == render_thread_->tid()) ||
602 (!started() && GetCurrentThreadId() == creating_thread_id_)); 606 (!started() && GetCurrentThreadId() == creating_thread_id_));
603 NOTREACHED() << "Error code: " << std::hex << err; 607 NOTREACHED() << "Error code: " << std::hex << err;
604 if (source_) 608 if (source_)
605 source_->OnError(this, static_cast<int>(err)); 609 source_->OnError(this, static_cast<int>(err));
606 } 610 }
607 611
608 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization( 612 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization(
609 IAudioClient* client, HANDLE event_handle, uint32* endpoint_buffer_size) { 613 IAudioClient* client, HANDLE event_handle, uint32* endpoint_buffer_size) {
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
678 DVLOG(1) << "IAudioClient::GetBufferSize: " << std::hex << hr; 682 DVLOG(1) << "IAudioClient::GetBufferSize: " << std::hex << hr;
679 return hr; 683 return hr;
680 } 684 }
681 685
682 *endpoint_buffer_size = buffer_size_in_frames; 686 *endpoint_buffer_size = buffer_size_in_frames;
683 DVLOG(2) << "endpoint buffer size: " << buffer_size_in_frames; 687 DVLOG(2) << "endpoint buffer size: " << buffer_size_in_frames;
684 return hr; 688 return hr;
685 } 689 }
686 690
687 } // namespace media 691 } // namespace media
OLDNEW
« no previous file with comments | « media/audio/win/audio_low_latency_output_win.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698