OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 559 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
570 { | 570 { |
571 // According to MSE specification (https://w3c.github.io/media-source/#sourc
ebuffer-init-segment-received) step 3.1: | 571 // According to MSE specification (https://w3c.github.io/media-source/#sourc
ebuffer-init-segment-received) step 3.1: |
572 // > If more than one track for a single type are present (ie 2 audio tracks
), then the Track IDs match the ones in the first initialization segment. | 572 // > If more than one track for a single type are present (ie 2 audio tracks
), then the Track IDs match the ones in the first initialization segment. |
573 // I.e. we only need to search by TrackID if there is more than one track, o
therwise we can assume that the only | 573 // I.e. we only need to search by TrackID if there is more than one track, o
therwise we can assume that the only |
574 // track of the given type is the same one that we had in previous init segm
ents. | 574 // track of the given type is the same one that we had in previous init segm
ents. |
575 if (trackList.length() == 1) | 575 if (trackList.length() == 1) |
576 return trackList.anonymousIndexedGetter(0); | 576 return trackList.anonymousIndexedGetter(0); |
577 return trackList.getTrackById(id); | 577 return trackList.getTrackById(id); |
578 } | 578 } |
579 | 579 |
580 WebVector<WebMediaPlayer::TrackId> SourceBuffer::initializationSegmentReceived(c
onst WebVector<MediaTrackInfo>& newTracks) | 580 const TrackDefault* SourceBuffer::getTrackDefault(const AtomicString& trackType,
const AtomicString& byteStreamTrackID) const |
| 581 { |
| 582 // This is a helper for implementation of default track label and default tr
ack language algorithms. |
| 583 // defaultTrackLabel spec: https://w3c.github.io/media-source/#sourcebuffer-
default-track-label |
| 584 // defaultTrackLanguage spec: https://w3c.github.io/media-source/#sourcebuff
er-default-track-language |
| 585 |
| 586 // 1. If trackDefaults contains a TrackDefault object with a type attribute
equal to type and a byteStreamTrackID attribute equal to byteStreamTrackID, |
| 587 // then return the value of the label/language attribute on this matching ob
ject and abort these steps. |
| 588 // 2. If trackDefaults contains a TrackDefault object with a type attribute
equal to type and a byteStreamTrackID attribute equal to an empty string, |
| 589 // then return the value of the label/language attribute on this matching ob
ject and abort these steps. |
| 590 // 3. Return an empty string to the caller |
| 591 const TrackDefault* trackDefaultWithEmptyBytestreamId = nullptr; |
| 592 for (unsigned i = 0; i < m_trackDefaults->length(); ++i) { |
| 593 const TrackDefault* trackDefault = m_trackDefaults->item(i); |
| 594 if (trackDefault->type() != trackType) |
| 595 continue; |
| 596 if (trackDefault->byteStreamTrackID() == byteStreamTrackID) |
| 597 return trackDefault; |
| 598 if (!trackDefaultWithEmptyBytestreamId && trackDefault->byteStreamTrackI
D() == "") |
| 599 trackDefaultWithEmptyBytestreamId = trackDefault; |
| 600 } |
| 601 return trackDefaultWithEmptyBytestreamId; |
| 602 } |
| 603 |
| 604 AtomicString SourceBuffer::defaultTrackLabel(const AtomicString& trackType, cons
t AtomicString& byteStreamTrackID) const |
| 605 { |
| 606 // Spec: https://w3c.github.io/media-source/#sourcebuffer-default-track-labe
l |
| 607 const TrackDefault* trackDefault = getTrackDefault(trackType, byteStreamTrac
kID); |
| 608 return trackDefault ? AtomicString(trackDefault->label()) : ""; |
| 609 } |
| 610 |
| 611 AtomicString SourceBuffer::defaultTrackLanguage(const AtomicString& trackType, c
onst AtomicString& byteStreamTrackID) const |
| 612 { |
| 613 // Spec: https://w3c.github.io/media-source/#sourcebuffer-default-track-lang
uage |
| 614 const TrackDefault* trackDefault = getTrackDefault(trackType, byteStreamTrac
kID); |
| 615 return trackDefault ? AtomicString(trackDefault->language()) : ""; |
| 616 } |
| 617 |
| 618 bool SourceBuffer::initializationSegmentReceived(const WebVector<MediaTrackInfo>
& newTracks) |
581 { | 619 { |
582 SBLOG << __FUNCTION__ << " this=" << this << " tracks=" << newTracks.size(); | 620 SBLOG << __FUNCTION__ << " this=" << this << " tracks=" << newTracks.size(); |
583 DCHECK(m_source); | 621 DCHECK(m_source); |
584 DCHECK(m_source->mediaElement()); | 622 DCHECK(m_source->mediaElement()); |
585 DCHECK(m_updating); | 623 DCHECK(m_updating); |
586 | 624 |
587 // TODO(servolk): Implement proper 'initialization segment received' algorit
hm according to MSE spec: | 625 if (!RuntimeEnabledFeatures::audioVideoTracksEnabled()) { |
| 626 if (!m_firstInitializationSegmentReceived) { |
| 627 m_source->setSourceBufferActive(this); |
| 628 m_firstInitializationSegmentReceived = true; |
| 629 } |
| 630 return true; |
| 631 } |
| 632 |
| 633 // Implementation of Initialization Segment Received, see |
588 // https://w3c.github.io/media-source/#sourcebuffer-init-segment-received | 634 // https://w3c.github.io/media-source/#sourcebuffer-init-segment-received |
589 WebVector<WebMediaPlayer::TrackId> result(newTracks.size()); | 635 |
590 unsigned resultIdx = 0; | 636 // Sort newTracks into audio and video tracks to facilitate implementation |
591 for (const auto& trackInfo : newTracks) { | 637 // of subsequent steps of this algorithm. |
592 if (!RuntimeEnabledFeatures::audioVideoTracksEnabled()) { | 638 Vector<MediaTrackInfo> newAudioTracks; |
593 static unsigned nextTrackId = 0; | 639 Vector<MediaTrackInfo> newVideoTracks; |
594 StringBuilder stringBuilder; | 640 for (const MediaTrackInfo& trackInfo : newTracks) { |
595 stringBuilder.appendNumber(++nextTrackId); | 641 const TrackBase* track = nullptr; |
596 result[resultIdx++] = stringBuilder.toString(); | |
597 continue; | |
598 } | |
599 | |
600 const TrackBase* trackBase = nullptr; | |
601 if (trackInfo.trackType == WebMediaPlayer::AudioTrack) { | 642 if (trackInfo.trackType == WebMediaPlayer::AudioTrack) { |
602 AudioTrack* audioTrack = nullptr; | 643 newAudioTracks.append(trackInfo); |
603 if (!m_firstInitializationSegmentReceived) { | 644 if (m_firstInitializationSegmentReceived) |
604 audioTrack = AudioTrack::create(trackInfo.id, trackInfo.kind, tr
ackInfo.label, trackInfo.language, false); | 645 track = findExistingTrackById(audioTracks(), trackInfo.id); |
605 SourceBufferTrackBaseSupplement::setSourceBuffer(*audioTrack, th
is); | |
606 audioTracks().add(audioTrack); | |
607 m_source->mediaElement()->audioTracks().add(audioTrack); | |
608 } else { | |
609 audioTrack = findExistingTrackById(audioTracks(), trackInfo.id); | |
610 DCHECK(audioTrack); | |
611 } | |
612 trackBase = audioTrack; | |
613 result[resultIdx++] = audioTrack->id(); | |
614 } else if (trackInfo.trackType == WebMediaPlayer::VideoTrack) { | 646 } else if (trackInfo.trackType == WebMediaPlayer::VideoTrack) { |
615 VideoTrack* videoTrack = nullptr; | 647 newVideoTracks.append(trackInfo); |
616 if (!m_firstInitializationSegmentReceived) { | 648 if (m_firstInitializationSegmentReceived) |
617 videoTrack = VideoTrack::create(trackInfo.id, trackInfo.kind, tr
ackInfo.label, trackInfo.language, false); | 649 track = findExistingTrackById(videoTracks(), trackInfo.id); |
618 SourceBufferTrackBaseSupplement::setSourceBuffer(*videoTrack, th
is); | |
619 videoTracks().add(videoTrack); | |
620 m_source->mediaElement()->videoTracks().add(videoTrack); | |
621 } else { | |
622 videoTrack = findExistingTrackById(videoTracks(), trackInfo.id); | |
623 DCHECK(videoTrack); | |
624 } | |
625 trackBase = videoTrack; | |
626 result[resultIdx++] = videoTrack->id(); | |
627 } else { | 650 } else { |
| 651 SBLOG << __FUNCTION__ << " this=" << this << " failed: unsupported t
rack type " << trackInfo.trackType; |
| 652 // TODO(servolk): Add handling of text tracks. |
628 NOTREACHED(); | 653 NOTREACHED(); |
629 } | 654 } |
630 (void)trackBase; | 655 if (m_firstInitializationSegmentReceived && !track) { |
| 656 SBLOG << __FUNCTION__ << " this=" << this << " failed: tracks mismat
ch the first init segment."; |
| 657 return false; |
| 658 } |
631 #if !LOG_DISABLED | 659 #if !LOG_DISABLED |
632 const char* logActionStr = m_firstInitializationSegmentReceived ? "using
existing" : "added"; | |
633 const char* logTrackTypeStr = (trackInfo.trackType == WebMediaPlayer::Au
dioTrack) ? "audio" : "video"; | 660 const char* logTrackTypeStr = (trackInfo.trackType == WebMediaPlayer::Au
dioTrack) ? "audio" : "video"; |
634 SBLOG << __FUNCTION__ << "(" << this << ") " << logActionStr << " " | 661 SBLOG << __FUNCTION__ << " this=" << this << " : " << logTrackTypeStr <<
" track " |
635 << logTrackTypeStr << " Track " << trackBase << " id=" << String(tra
ckBase->id()) | 662 << " id=" << String(trackInfo.id) << " byteStreamTrackID=" << String
(trackInfo.byteStreamTrackID) |
636 << " label=" << trackBase->label() << " lang=" << trackBase->languag
e(); | 663 << " kind=" << String(trackInfo.kind) << " label=" << String(trackIn
fo.label) << " language=" << String(trackInfo.language); |
637 #endif | 664 #endif |
638 } | 665 } |
639 | 666 |
| 667 // 1. Update the duration attribute if it currently equals NaN: |
| 668 // TODO(servolk): Pass also stream duration into initSegmentReceived. |
| 669 |
| 670 // 2. If the initialization segment has no audio, video, or text tracks, the
n run the append error algorithm with the decode error parameter set to true and
abort these steps. |
| 671 if (newTracks.size() == 0) { |
| 672 SBLOG << __FUNCTION__ << " this=" << this << " failed: no tracks found i
n the init segment."; |
| 673 // The append error algorithm will be called at the top level after we r
eturn false here to indicate failure. |
| 674 return false; |
| 675 } |
| 676 |
| 677 // 3. If the first initialization segment received flag is true, then run th
e following steps: |
| 678 if (m_firstInitializationSegmentReceived) { |
| 679 // 3.1 Verify the following properties. If any of the checks fail then r
un the append error algorithm with the decode error parameter set to true and ab
ort these steps. |
| 680 bool tracksMatchFirstInitSegment = true; |
| 681 // - The number of audio, video, and text tracks match what was in the f
irst initialization segment. |
| 682 if (newAudioTracks.size() != audioTracks().length() || newVideoTracks.si
ze() != videoTracks().length()) { |
| 683 tracksMatchFirstInitSegment = false; |
| 684 } |
| 685 // - The codecs for each track, match what was specified in the first in
itialization segment. |
| 686 // This is currently done in MediaSourceState::OnNewConfigs. |
| 687 // - If more than one track for a single type are present (ie 2 audio tr
acks), then the Track IDs match the ones in the first initialization segment. |
| 688 if (tracksMatchFirstInitSegment && newAudioTracks.size() > 1) { |
| 689 for (size_t i = 0; i < newAudioTracks.size(); ++i) { |
| 690 const String& newTrackId = newVideoTracks[i].id; |
| 691 if (newTrackId != String(audioTracks().anonymousIndexedGetter(i)
->id())) { |
| 692 tracksMatchFirstInitSegment = false; |
| 693 break; |
| 694 } |
| 695 } |
| 696 } |
| 697 |
| 698 if (tracksMatchFirstInitSegment && newVideoTracks.size() > 1) { |
| 699 for (size_t i = 0; i < newVideoTracks.size(); ++i) { |
| 700 const String& newTrackId = newVideoTracks[i].id; |
| 701 if (newTrackId != String(videoTracks().anonymousIndexedGetter(i)
->id())) { |
| 702 tracksMatchFirstInitSegment = false; |
| 703 break; |
| 704 } |
| 705 } |
| 706 } |
| 707 |
| 708 if (!tracksMatchFirstInitSegment) { |
| 709 SBLOG << __FUNCTION__ << " this=" << this << " failed: tracks mismat
ch the first init segment."; |
| 710 // The append error algorithm will be called at the top level after
we return false here to indicate failure. |
| 711 return false; |
| 712 } |
| 713 |
| 714 // 3.2 Add the appropriate track descriptions from this initialization s
egment to each of the track buffers. |
| 715 // This is done in Chromium code in stream parsers and demuxer implement
ations. |
| 716 |
| 717 // 3.3 Set the need random access point flag on all track buffers to tru
e. |
| 718 // This is done in Chromium code, see MediaSourceState::OnNewConfigs. |
| 719 } |
| 720 |
| 721 // 4. Let active track flag equal false. |
| 722 m_activeTrack = false; |
| 723 |
| 724 // 5. If the first initialization segment received flag is false, then run t
he following steps: |
640 if (!m_firstInitializationSegmentReceived) { | 725 if (!m_firstInitializationSegmentReceived) { |
641 // 5. If active track flag equals true, then run the following steps: | 726 // 5.1 If the initialization segment contains tracks with codecs the use
r agent does not support, then run the append error algorithm with the decode er
ror parameter set to true and abort these steps. |
642 // 5.1. Add this SourceBuffer to activeSourceBuffers. | 727 // This is done in Chromium code, see MediaSourceState::OnNewConfigs. |
643 // 5.2. Queue a task to fire a simple event named addsourcebuffer at | 728 |
| 729 // 5.2 For each audio track in the initialization segment, run following
steps: |
| 730 for (const MediaTrackInfo& trackInfo : newAudioTracks) { |
| 731 // 5.2.1 Let audio byte stream track ID be the Track ID for the curr
ent track being processed. |
| 732 const auto& byteStreamTrackID = trackInfo.byteStreamTrackID; |
| 733 // 5.2.2 Let audio language be a BCP 47 language tag for the languag
e specified in the initialization segment for this track or an empty string if n
o language info is present. |
| 734 WebString language = trackInfo.language; |
| 735 // 5.2.3 If audio language equals an empty string or the 'und' BCP 4
7 value, then run the default track language algorithm with byteStreamTrackID se
t to |
| 736 // audio byte stream track ID and type set to "audio" and assign the
value returned by the algorithm to audio language. |
| 737 if (language.isEmpty() || language == "und") |
| 738 language = defaultTrackLanguage(TrackDefault::audioKeyword(), by
teStreamTrackID); |
| 739 // 5.2.4 Let audio label be a label specified in the initialization
segment for this track or an empty string if no label info is present. |
| 740 WebString label = trackInfo.label; |
| 741 // 5.3.5 If audio label equals an empty string, then run the default
track label algorithm with byteStreamTrackID set to audio byte stream track ID
and |
| 742 // type set to "audio" and assign the value returned by the algorith
m to audio label. |
| 743 if (label.isEmpty()) |
| 744 label = defaultTrackLabel(TrackDefault::audioKeyword(), byteStre
amTrackID); |
| 745 // 5.2.6 Let audio kinds be an array of kind strings specified in th
e initialization segment for this track or an empty array if no kind information
is provided. |
| 746 const auto& kind = trackInfo.kind; |
| 747 // 5.2.7 TODO(servolk): Implement track kind processing. |
| 748 // 5.2.8.2 Let new audio track be a new AudioTrack object. |
| 749 AudioTrack* audioTrack = AudioTrack::create(byteStreamTrackID, kind,
label, language, false); |
| 750 SourceBufferTrackBaseSupplement::setSourceBuffer(*audioTrack, this); |
| 751 // 5.2.8.7 If audioTracks.length equals 0, then run the following st
eps: |
| 752 if (audioTracks().length() == 0) { |
| 753 // 5.2.8.7.1 Set the enabled property on new audio track to true
. |
| 754 audioTrack->setEnabled(true); |
| 755 // 5.2.8.7.2 Set active track flag to true. |
| 756 m_activeTrack = true; |
| 757 } |
| 758 // 5.2.8.8 Add new audio track to the audioTracks attribute on this
SourceBuffer object. |
| 759 // 5.2.8.9 Queue a task to fire a trusted event named addtrack, that
does not bubble and is not cancelable, and that uses the TrackEvent interface,
at the AudioTrackList object referenced by the audioTracks attribute on this Sou
rceBuffer object. |
| 760 audioTracks().add(audioTrack); |
| 761 // 5.2.8.10 Add new audio track to the audioTracks attribute on the
HTMLMediaElement. |
| 762 // 5.2.8.11 Queue a task to fire a trusted event named addtrack, tha
t does not bubble and is not cancelable, and that uses the TrackEvent interface,
at the AudioTrackList object referenced by the audioTracks attribute on the HTM
LMediaElement. |
| 763 m_source->mediaElement()->audioTracks().add(audioTrack); |
| 764 } |
| 765 |
| 766 // 5.3. For each video track in the initialization segment, run followin
g steps: |
| 767 for (const MediaTrackInfo& trackInfo : newVideoTracks) { |
| 768 // 5.3.1 Let video byte stream track ID be the Track ID for the curr
ent track being processed. |
| 769 const auto& byteStreamTrackID = trackInfo.byteStreamTrackID; |
| 770 // 5.3.2 Let video language be a BCP 47 language tag for the languag
e specified in the initialization segment for this track or an empty string if n
o language info is present. |
| 771 WebString language = trackInfo.language; |
| 772 // 5.3.3 If video language equals an empty string or the 'und' BCP 4
7 value, then run the default track language algorithm with byteStreamTrackID se
t to |
| 773 // video byte stream track ID and type set to "video" and assign the
value returned by the algorithm to video language. |
| 774 if (language.isEmpty() || language == "und") |
| 775 language = defaultTrackLanguage(TrackDefault::videoKeyword(), by
teStreamTrackID); |
| 776 // 5.3.4 Let video label be a label specified in the initialization
segment for this track or an empty string if no label info is present. |
| 777 WebString label = trackInfo.label; |
| 778 // 5.3.5 If video label equals an empty string, then run the default
track label algorithm with byteStreamTrackID set to video byte stream track ID
and |
| 779 // type set to "video" and assign the value returned by the algorith
m to video label. |
| 780 if (label.isEmpty()) |
| 781 label = defaultTrackLabel(TrackDefault::videoKeyword(), byteStre
amTrackID); |
| 782 // 5.3.6 Let video kinds be an array of kind strings specified in th
e initialization segment for this track or an empty array if no kind information
is provided. |
| 783 const auto& kind = trackInfo.kind; |
| 784 // 5.3.7 TODO(servolk): Implement track kind processing. |
| 785 // 5.3.8.2 Let new video track be a new VideoTrack object. |
| 786 VideoTrack* videoTrack = VideoTrack::create(byteStreamTrackID, kind,
label, language, false); |
| 787 SourceBufferTrackBaseSupplement::setSourceBuffer(*videoTrack, this); |
| 788 // 5.3.8.7 If videoTracks.length equals 0, then run the following st
eps: |
| 789 if (videoTracks().length() == 0) { |
| 790 // 5.3.8.7.1 Set the selected property on new audio track to tru
e. |
| 791 videoTrack->setSelected(true); |
| 792 // 5.3.8.7.2 Set active track flag to true. |
| 793 m_activeTrack = true; |
| 794 } |
| 795 // 5.3.8.8 Add new video track to the videoTracks attribute on this
SourceBuffer object. |
| 796 // 5.3.8.9 Queue a task to fire a trusted event named addtrack, that
does not bubble and is not cancelable, and that uses the TrackEvent interface,
at the VideoTrackList object referenced by the videoTracks attribute on this Sou
rceBuffer object. |
| 797 videoTracks().add(videoTrack); |
| 798 // 5.3.8.10 Add new video track to the videoTracks attribute on the
HTMLMediaElement. |
| 799 // 5.3.8.11 Queue a task to fire a trusted event named addtrack, tha
t does not bubble and is not cancelable, and that uses the TrackEvent interface,
at the VideoTrackList object referenced by the videoTracks attribute on the HTM
LMediaElement. |
| 800 m_source->mediaElement()->videoTracks().add(videoTrack); |
| 801 } |
| 802 |
| 803 // 5.4 TODO(servolk): Add text track processing here. |
| 804 |
| 805 // 5.5 If active track flag equals true, then run the following steps: |
644 // activesourcebuffers. | 806 // activesourcebuffers. |
645 m_source->setSourceBufferActive(this); | 807 if (m_activeTrack) { |
646 | 808 // 5.5.1 Add this SourceBuffer to activeSourceBuffers. |
647 // 6. Set first initialization segment received flag to true. | 809 // 5.5.2 Queue a task to fire a simple event named addsourcebuffer a
t activeSourceBuffers |
| 810 m_source->setSourceBufferActive(this); |
| 811 } |
| 812 |
| 813 // 5.6. Set first initialization segment received flag to true. |
648 m_firstInitializationSegmentReceived = true; | 814 m_firstInitializationSegmentReceived = true; |
649 } | 815 } |
650 | 816 |
651 return result; | 817 return true; |
652 } | 818 } |
653 | 819 |
654 bool SourceBuffer::hasPendingActivity() const | 820 bool SourceBuffer::hasPendingActivity() const |
655 { | 821 { |
656 return m_source; | 822 return m_source; |
657 } | 823 } |
658 | 824 |
659 void SourceBuffer::suspend() | 825 void SourceBuffer::suspend() |
660 { | 826 { |
661 m_appendBufferAsyncPartRunner->suspend(); | 827 m_appendBufferAsyncPartRunner->suspend(); |
(...skipping 387 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1049 visitor->trace(m_removeAsyncPartRunner); | 1215 visitor->trace(m_removeAsyncPartRunner); |
1050 visitor->trace(m_appendStreamAsyncPartRunner); | 1216 visitor->trace(m_appendStreamAsyncPartRunner); |
1051 visitor->trace(m_stream); | 1217 visitor->trace(m_stream); |
1052 visitor->trace(m_audioTracks); | 1218 visitor->trace(m_audioTracks); |
1053 visitor->trace(m_videoTracks); | 1219 visitor->trace(m_videoTracks); |
1054 EventTargetWithInlineData::trace(visitor); | 1220 EventTargetWithInlineData::trace(visitor); |
1055 ActiveDOMObject::trace(visitor); | 1221 ActiveDOMObject::trace(visitor); |
1056 } | 1222 } |
1057 | 1223 |
1058 } // namespace blink | 1224 } // namespace blink |
OLD | NEW |