Java Code Examples for com.google.android.exoplayer2.util.MimeTypes#isAudio()
The following examples show how to use
com.google.android.exoplayer2.util.MimeTypes#isAudio() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ClippingMediaPeriod.java From Telegram with GNU General Public License v2.0 | 6 votes |
private static boolean shouldKeepInitialDiscontinuity(long startUs, TrackSelection[] selections) { // If the clipping start position is non-zero, the clipping sample streams will adjust // timestamps on buffers they read from the unclipped sample streams. These adjusted buffer // timestamps can be negative, because sample streams provide buffers starting at a key-frame, // which may be before the clipping start point. When the renderer reads a buffer with a // negative timestamp, its offset timestamp can jump backwards compared to the last timestamp // read in the previous period. Renderer implementations may not allow this, so we signal a // discontinuity which resets the renderers before they read the clipping sample stream. // However, for audio-only track selections we assume to have random access seek behaviour and // do not need an initial discontinuity to reset the renderer. if (startUs != 0) { for (TrackSelection trackSelection : selections) { if (trackSelection != null) { Format selectedFormat = trackSelection.getSelectedFormat(); if (!MimeTypes.isAudio(selectedFormat.sampleMimeType)) { return true; } } } } return false; }
Example 2
Source File: DashManifestParser.java From Telegram-FOSS with GNU General Public License v2.0 | 5 votes |
protected int getContentType(Format format) { String sampleMimeType = format.sampleMimeType; if (TextUtils.isEmpty(sampleMimeType)) { return C.TRACK_TYPE_UNKNOWN; } else if (MimeTypes.isVideo(sampleMimeType)) { return C.TRACK_TYPE_VIDEO; } else if (MimeTypes.isAudio(sampleMimeType)) { return C.TRACK_TYPE_AUDIO; } else if (mimeTypeIsRawText(sampleMimeType)) { return C.TRACK_TYPE_TEXT; } return C.TRACK_TYPE_UNKNOWN; }
Example 3
Source File: SimpleDecoderAudioRenderer.java From Telegram with GNU General Public License v2.0 | 5 votes |
@Override public final int supportsFormat(Format format) { if (!MimeTypes.isAudio(format.sampleMimeType)) { return FORMAT_UNSUPPORTED_TYPE; } int formatSupport = supportsFormatInternal(drmSessionManager, format); if (formatSupport <= FORMAT_UNSUPPORTED_DRM) { return formatSupport; } int tunnelingSupport = Util.SDK_INT >= 21 ? TUNNELING_SUPPORTED : TUNNELING_NOT_SUPPORTED; return ADAPTIVE_NOT_SEAMLESS | tunnelingSupport | formatSupport; }
Example 4
Source File: ExtractorMediaPeriod.java From TelePlus-Android with GNU General Public License v2.0 | 5 votes |
private void maybeFinishPrepare() { if (released || prepared || seekMap == null || !sampleQueuesBuilt) { return; } for (SampleQueue sampleQueue : sampleQueues) { if (sampleQueue.getUpstreamFormat() == null) { return; } } loadCondition.close(); int trackCount = sampleQueues.length; TrackGroup[] trackArray = new TrackGroup[trackCount]; trackIsAudioVideoFlags = new boolean[trackCount]; trackEnabledStates = new boolean[trackCount]; trackFormatNotificationSent = new boolean[trackCount]; durationUs = seekMap.getDurationUs(); for (int i = 0; i < trackCount; i++) { Format trackFormat = sampleQueues[i].getUpstreamFormat(); trackArray[i] = new TrackGroup(trackFormat); String mimeType = trackFormat.sampleMimeType; boolean isAudioVideo = MimeTypes.isVideo(mimeType) || MimeTypes.isAudio(mimeType); trackIsAudioVideoFlags[i] = isAudioVideo; haveAudioVideoTracks |= isAudioVideo; } tracks = new TrackGroupArray(trackArray); if (minLoadableRetryCount == ExtractorMediaSource.MIN_RETRY_COUNT_DEFAULT_FOR_MEDIA && length == C.LENGTH_UNSET && seekMap.getDurationUs() == C.TIME_UNSET) { actualMinLoadableRetryCount = ExtractorMediaSource.DEFAULT_MIN_LOADABLE_RETRY_COUNT_LIVE; } prepared = true; listener.onSourceInfoRefreshed(durationUs, seekMap.isSeekable()); callback.onPrepared(this); }
Example 5
Source File: FfmpegAudioRenderer.java From TelePlus-Android with GNU General Public License v2.0 | 5 votes |
@Override protected int supportsFormatInternal(DrmSessionManager<ExoMediaCrypto> drmSessionManager, Format format) { Assertions.checkNotNull(format.sampleMimeType); if (!MimeTypes.isAudio(format.sampleMimeType)) { return FORMAT_UNSUPPORTED_TYPE; } else if (!FfmpegLibrary.supportsFormat(format.sampleMimeType, format.pcmEncoding) || !isOutputSupported(format)) { return FORMAT_UNSUPPORTED_SUBTYPE; } else if (!supportsFormatDrm(drmSessionManager, format.drmInitData)) { return FORMAT_UNSUPPORTED_DRM; } else { return FORMAT_HANDLED; } }
Example 6
Source File: TrackSelectionHelper.java From ExoPlayer-Offline with Apache License 2.0 | 5 votes |
private static String buildTrackName(Format format) { String trackName; if (MimeTypes.isVideo(format.sampleMimeType)) { trackName = joinWithSeparator(joinWithSeparator(buildResolutionString(format), buildBitrateString(format)), buildTrackIdString(format)); } else if (MimeTypes.isAudio(format.sampleMimeType)) { trackName = joinWithSeparator(joinWithSeparator(joinWithSeparator(buildLanguageString(format), buildAudioPropertyString(format)), buildBitrateString(format)), buildTrackIdString(format)); } else { trackName = joinWithSeparator(joinWithSeparator(buildLanguageString(format), buildBitrateString(format)), buildTrackIdString(format)); } return trackName.length() == 0 ? "unknown" : trackName; }
Example 7
Source File: ExtractorMediaPeriod.java From TelePlus-Android with GNU General Public License v2.0 | 5 votes |
private void maybeFinishPrepare() { if (released || prepared || seekMap == null || !sampleQueuesBuilt) { return; } for (SampleQueue sampleQueue : sampleQueues) { if (sampleQueue.getUpstreamFormat() == null) { return; } } loadCondition.close(); int trackCount = sampleQueues.length; TrackGroup[] trackArray = new TrackGroup[trackCount]; trackIsAudioVideoFlags = new boolean[trackCount]; trackEnabledStates = new boolean[trackCount]; trackFormatNotificationSent = new boolean[trackCount]; durationUs = seekMap.getDurationUs(); for (int i = 0; i < trackCount; i++) { Format trackFormat = sampleQueues[i].getUpstreamFormat(); trackArray[i] = new TrackGroup(trackFormat); String mimeType = trackFormat.sampleMimeType; boolean isAudioVideo = MimeTypes.isVideo(mimeType) || MimeTypes.isAudio(mimeType); trackIsAudioVideoFlags[i] = isAudioVideo; haveAudioVideoTracks |= isAudioVideo; } tracks = new TrackGroupArray(trackArray); if (minLoadableRetryCount == ExtractorMediaSource.MIN_RETRY_COUNT_DEFAULT_FOR_MEDIA && length == C.LENGTH_UNSET && seekMap.getDurationUs() == C.TIME_UNSET) { actualMinLoadableRetryCount = ExtractorMediaSource.DEFAULT_MIN_LOADABLE_RETRY_COUNT_LIVE; } prepared = true; listener.onSourceInfoRefreshed(durationUs, seekMap.isSeekable()); callback.onPrepared(this); }
Example 8
Source File: DashManifestParser.java From Telegram with GNU General Public License v2.0 | 5 votes |
protected int getContentType(Format format) { String sampleMimeType = format.sampleMimeType; if (TextUtils.isEmpty(sampleMimeType)) { return C.TRACK_TYPE_UNKNOWN; } else if (MimeTypes.isVideo(sampleMimeType)) { return C.TRACK_TYPE_VIDEO; } else if (MimeTypes.isAudio(sampleMimeType)) { return C.TRACK_TYPE_AUDIO; } else if (mimeTypeIsRawText(sampleMimeType)) { return C.TRACK_TYPE_TEXT; } return C.TRACK_TYPE_UNKNOWN; }
Example 9
Source File: DashManifestParser.java From MediaSDK with Apache License 2.0 | 5 votes |
/** * Derives a sample mimeType from a container mimeType and codecs attribute. * * @param containerMimeType The mimeType of the container. * @param codecs The codecs attribute. * @return The derived sample mimeType, or null if it could not be derived. */ @Nullable private static String getSampleMimeType( @Nullable String containerMimeType, @Nullable String codecs) { if (MimeTypes.isAudio(containerMimeType)) { return MimeTypes.getAudioMediaMimeType(codecs); } else if (MimeTypes.isVideo(containerMimeType)) { return MimeTypes.getVideoMediaMimeType(codecs); } else if (mimeTypeIsRawText(containerMimeType)) { return containerMimeType; } else if (MimeTypes.APPLICATION_MP4.equals(containerMimeType)) { if (codecs != null) { if (codecs.startsWith("stpp")) { return MimeTypes.APPLICATION_TTML; } else if (codecs.startsWith("wvtt")) { return MimeTypes.APPLICATION_MP4VTT; } } } else if (MimeTypes.APPLICATION_RAWCC.equals(containerMimeType)) { if (codecs != null) { if (codecs.contains("cea708")) { return MimeTypes.APPLICATION_CEA708; } else if (codecs.contains("eia608") || codecs.contains("cea608")) { return MimeTypes.APPLICATION_CEA608; } } return null; } return null; }
Example 10
Source File: ExtractorMediaPeriod.java From K-Sonic with MIT License | 5 votes |
private void maybeFinishPrepare() { if (released || prepared || seekMap == null || !tracksBuilt) { return; } int trackCount = sampleQueues.size(); for (int i = 0; i < trackCount; i++) { if (sampleQueues.valueAt(i).getUpstreamFormat() == null) { return; } } loadCondition.close(); TrackGroup[] trackArray = new TrackGroup[trackCount]; trackIsAudioVideoFlags = new boolean[trackCount]; trackEnabledStates = new boolean[trackCount]; durationUs = seekMap.getDurationUs(); for (int i = 0; i < trackCount; i++) { Format trackFormat = sampleQueues.valueAt(i).getUpstreamFormat(); trackArray[i] = new TrackGroup(trackFormat); String mimeType = trackFormat.sampleMimeType; boolean isAudioVideo = MimeTypes.isVideo(mimeType) || MimeTypes.isAudio(mimeType); trackIsAudioVideoFlags[i] = isAudioVideo; haveAudioVideoTracks |= isAudioVideo; } tracks = new TrackGroupArray(trackArray); prepared = true; sourceListener.onSourceInfoRefreshed( new SinglePeriodTimeline(durationUs, seekMap.isSeekable()), null); callback.onPrepared(this); }
Example 11
Source File: DashManifestParser.java From Telegram-FOSS with GNU General Public License v2.0 | 5 votes |
/** * Derives a sample mimeType from a container mimeType and codecs attribute. * * @param containerMimeType The mimeType of the container. * @param codecs The codecs attribute. * @return The derived sample mimeType, or null if it could not be derived. */ private static String getSampleMimeType(String containerMimeType, String codecs) { if (MimeTypes.isAudio(containerMimeType)) { return MimeTypes.getAudioMediaMimeType(codecs); } else if (MimeTypes.isVideo(containerMimeType)) { return MimeTypes.getVideoMediaMimeType(codecs); } else if (mimeTypeIsRawText(containerMimeType)) { return containerMimeType; } else if (MimeTypes.APPLICATION_MP4.equals(containerMimeType)) { if (codecs != null) { if (codecs.startsWith("stpp")) { return MimeTypes.APPLICATION_TTML; } else if (codecs.startsWith("wvtt")) { return MimeTypes.APPLICATION_MP4VTT; } } } else if (MimeTypes.APPLICATION_RAWCC.equals(containerMimeType)) { if (codecs != null) { if (codecs.contains("cea708")) { return MimeTypes.APPLICATION_CEA708; } else if (codecs.contains("eia608") || codecs.contains("cea608")) { return MimeTypes.APPLICATION_CEA608; } } return null; } return null; }
Example 12
Source File: DashManifestParser.java From TelePlus-Android with GNU General Public License v2.0 | 4 votes |
protected Format buildFormat( String id, String label, String containerMimeType, int width, int height, float frameRate, int audioChannels, int audioSamplingRate, int bitrate, String language, @C.SelectionFlags int selectionFlags, List<Descriptor> accessibilityDescriptors, String codecs, List<Descriptor> supplementalProperties) { String sampleMimeType = getSampleMimeType(containerMimeType, codecs); if (sampleMimeType != null) { if (MimeTypes.AUDIO_E_AC3.equals(sampleMimeType)) { sampleMimeType = parseEac3SupplementalProperties(supplementalProperties); } if (MimeTypes.isVideo(sampleMimeType)) { return Format.createVideoContainerFormat( id, label, containerMimeType, sampleMimeType, codecs, bitrate, width, height, frameRate, /* initializationData= */ null, selectionFlags); } else if (MimeTypes.isAudio(sampleMimeType)) { return Format.createAudioContainerFormat( id, label, containerMimeType, sampleMimeType, codecs, bitrate, audioChannels, audioSamplingRate, /* initializationData= */ null, selectionFlags, language); } else if (mimeTypeIsRawText(sampleMimeType)) { int accessibilityChannel; if (MimeTypes.APPLICATION_CEA608.equals(sampleMimeType)) { accessibilityChannel = parseCea608AccessibilityChannel(accessibilityDescriptors); } else if (MimeTypes.APPLICATION_CEA708.equals(sampleMimeType)) { accessibilityChannel = parseCea708AccessibilityChannel(accessibilityDescriptors); } else { accessibilityChannel = Format.NO_VALUE; } return Format.createTextContainerFormat( id, label, containerMimeType, sampleMimeType, codecs, bitrate, selectionFlags, language, accessibilityChannel); } } return Format.createContainerFormat( id, label, containerMimeType, sampleMimeType, codecs, bitrate, selectionFlags, language); }
Example 13
Source File: DashManifestParser.java From Telegram-FOSS with GNU General Public License v2.0 | 4 votes |
protected Format buildFormat( String id, String containerMimeType, int width, int height, float frameRate, int audioChannels, int audioSamplingRate, int bitrate, String language, List<Descriptor> roleDescriptors, List<Descriptor> accessibilityDescriptors, String codecs, List<Descriptor> supplementalProperties) { String sampleMimeType = getSampleMimeType(containerMimeType, codecs); @C.SelectionFlags int selectionFlags = parseSelectionFlagsFromRoleDescriptors(roleDescriptors); @C.RoleFlags int roleFlags = parseRoleFlagsFromRoleDescriptors(roleDescriptors); roleFlags |= parseRoleFlagsFromAccessibilityDescriptors(accessibilityDescriptors); if (sampleMimeType != null) { if (MimeTypes.AUDIO_E_AC3.equals(sampleMimeType)) { sampleMimeType = parseEac3SupplementalProperties(supplementalProperties); } if (MimeTypes.isVideo(sampleMimeType)) { return Format.createVideoContainerFormat( id, /* label= */ null, containerMimeType, sampleMimeType, codecs, /* metadata= */ null, bitrate, width, height, frameRate, /* initializationData= */ null, selectionFlags, roleFlags); } else if (MimeTypes.isAudio(sampleMimeType)) { return Format.createAudioContainerFormat( id, /* label= */ null, containerMimeType, sampleMimeType, codecs, /* metadata= */ null, bitrate, audioChannels, audioSamplingRate, /* initializationData= */ null, selectionFlags, roleFlags, language); } else if (mimeTypeIsRawText(sampleMimeType)) { int accessibilityChannel; if (MimeTypes.APPLICATION_CEA608.equals(sampleMimeType)) { accessibilityChannel = parseCea608AccessibilityChannel(accessibilityDescriptors); } else if (MimeTypes.APPLICATION_CEA708.equals(sampleMimeType)) { accessibilityChannel = parseCea708AccessibilityChannel(accessibilityDescriptors); } else { accessibilityChannel = Format.NO_VALUE; } return Format.createTextContainerFormat( id, /* label= */ null, containerMimeType, sampleMimeType, codecs, bitrate, selectionFlags, roleFlags, language, accessibilityChannel); } } return Format.createContainerFormat( id, /* label= */ null, containerMimeType, sampleMimeType, codecs, bitrate, selectionFlags, roleFlags, language); }
Example 14
Source File: HlsSampleStreamWrapper.java From Telegram with GNU General Public License v2.0 | 4 votes |
/** * Builds tracks that are exposed by this {@link HlsSampleStreamWrapper} instance, as well as * internal data-structures required for operation. * * <p>Tracks in HLS are complicated. A HLS master playlist contains a number of "variants". Each * variant stream typically contains muxed video, audio and (possibly) additional audio, metadata * and caption tracks. We wish to allow the user to select between an adaptive track that spans * all variants, as well as each individual variant. If multiple audio tracks are present within * each variant then we wish to allow the user to select between those also. * * <p>To do this, tracks are constructed as follows. The {@link HlsChunkSource} exposes (N+1) * tracks, where N is the number of variants defined in the HLS master playlist. These consist of * one adaptive track defined to span all variants and a track for each individual variant. The * adaptive track is initially selected. The extractor is then prepared to discover the tracks * inside of each variant stream. The two sets of tracks are then combined by this method to * create a third set, which is the set exposed by this {@link HlsSampleStreamWrapper}: * * <ul> * <li>The extractor tracks are inspected to infer a "primary" track type. If a video track is * present then it is always the primary type. If not, audio is the primary type if present. * Else text is the primary type if present. Else there is no primary type. * <li>If there is exactly one extractor track of the primary type, it's expanded into (N+1) * exposed tracks, all of which correspond to the primary extractor track and each of which * corresponds to a different chunk source track. Selecting one of these tracks has the * effect of switching the selected track on the chunk source. * <li>All other extractor tracks are exposed directly. Selecting one of these tracks has the * effect of selecting an extractor track, leaving the selected track on the chunk source * unchanged. * </ul> */ private void buildTracksFromSampleStreams() { // Iterate through the extractor tracks to discover the "primary" track type, and the index // of the single track of this type. int primaryExtractorTrackType = C.TRACK_TYPE_NONE; int primaryExtractorTrackIndex = C.INDEX_UNSET; int extractorTrackCount = sampleQueues.length; for (int i = 0; i < extractorTrackCount; i++) { String sampleMimeType = sampleQueues[i].getUpstreamFormat().sampleMimeType; int trackType; if (MimeTypes.isVideo(sampleMimeType)) { trackType = C.TRACK_TYPE_VIDEO; } else if (MimeTypes.isAudio(sampleMimeType)) { trackType = C.TRACK_TYPE_AUDIO; } else if (MimeTypes.isText(sampleMimeType)) { trackType = C.TRACK_TYPE_TEXT; } else { trackType = C.TRACK_TYPE_NONE; } if (getTrackTypeScore(trackType) > getTrackTypeScore(primaryExtractorTrackType)) { primaryExtractorTrackType = trackType; primaryExtractorTrackIndex = i; } else if (trackType == primaryExtractorTrackType && primaryExtractorTrackIndex != C.INDEX_UNSET) { // We have multiple tracks of the primary type. We only want an index if there only exists a // single track of the primary type, so unset the index again. primaryExtractorTrackIndex = C.INDEX_UNSET; } } TrackGroup chunkSourceTrackGroup = chunkSource.getTrackGroup(); int chunkSourceTrackCount = chunkSourceTrackGroup.length; // Instantiate the necessary internal data-structures. primaryTrackGroupIndex = C.INDEX_UNSET; trackGroupToSampleQueueIndex = new int[extractorTrackCount]; for (int i = 0; i < extractorTrackCount; i++) { trackGroupToSampleQueueIndex[i] = i; } // Construct the set of exposed track groups. TrackGroup[] trackGroups = new TrackGroup[extractorTrackCount]; for (int i = 0; i < extractorTrackCount; i++) { Format sampleFormat = sampleQueues[i].getUpstreamFormat(); if (i == primaryExtractorTrackIndex) { Format[] formats = new Format[chunkSourceTrackCount]; if (chunkSourceTrackCount == 1) { formats[0] = sampleFormat.copyWithManifestFormatInfo(chunkSourceTrackGroup.getFormat(0)); } else { for (int j = 0; j < chunkSourceTrackCount; j++) { formats[j] = deriveFormat(chunkSourceTrackGroup.getFormat(j), sampleFormat, true); } } trackGroups[i] = new TrackGroup(formats); primaryTrackGroupIndex = i; } else { Format trackFormat = primaryExtractorTrackType == C.TRACK_TYPE_VIDEO && MimeTypes.isAudio(sampleFormat.sampleMimeType) ? muxedAudioFormat : null; trackGroups[i] = new TrackGroup(deriveFormat(trackFormat, sampleFormat, false)); } } this.trackGroups = new TrackGroupArray(trackGroups); Assertions.checkState(optionalTrackGroups == null); optionalTrackGroups = TrackGroupArray.EMPTY; }
Example 15
Source File: ProgressiveMediaPeriod.java From MediaSDK with Apache License 2.0 | 4 votes |
private void maybeFinishPrepare() { SeekMap seekMap = this.seekMap; if (released || prepared || !sampleQueuesBuilt || seekMap == null) { return; } for (SampleQueue sampleQueue : sampleQueues) { if (sampleQueue.getUpstreamFormat() == null) { return; } } loadCondition.close(); int trackCount = sampleQueues.length; TrackGroup[] trackArray = new TrackGroup[trackCount]; boolean[] trackIsAudioVideoFlags = new boolean[trackCount]; durationUs = seekMap.getDurationUs(); for (int i = 0; i < trackCount; i++) { Format trackFormat = sampleQueues[i].getUpstreamFormat(); String mimeType = trackFormat.sampleMimeType; boolean isAudio = MimeTypes.isAudio(mimeType); boolean isAudioVideo = isAudio || MimeTypes.isVideo(mimeType); trackIsAudioVideoFlags[i] = isAudioVideo; haveAudioVideoTracks |= isAudioVideo; IcyHeaders icyHeaders = this.icyHeaders; if (icyHeaders != null) { if (isAudio || sampleQueueTrackIds[i].isIcyTrack) { Metadata metadata = trackFormat.metadata; trackFormat = trackFormat.copyWithMetadata( metadata == null ? new Metadata(icyHeaders) : metadata.copyWithAppendedEntries(icyHeaders)); } if (isAudio && trackFormat.bitrate == Format.NO_VALUE && icyHeaders.bitrate != Format.NO_VALUE) { trackFormat = trackFormat.copyWithBitrate(icyHeaders.bitrate); } } trackArray[i] = new TrackGroup(trackFormat); } isLive = length == C.LENGTH_UNSET && seekMap.getDurationUs() == C.TIME_UNSET; dataType = isLive ? C.DATA_TYPE_MEDIA_PROGRESSIVE_LIVE : C.DATA_TYPE_MEDIA; preparedState = new PreparedState(seekMap, new TrackGroupArray(trackArray), trackIsAudioVideoFlags); prepared = true; listener.onSourceInfoRefreshed(durationUs, seekMap.isSeekable(), isLive); Assertions.checkNotNull(callback).onPrepared(this); }
Example 16
Source File: HlsSampleStreamWrapper.java From Telegram-FOSS with GNU General Public License v2.0 | 4 votes |
/** * Builds tracks that are exposed by this {@link HlsSampleStreamWrapper} instance, as well as * internal data-structures required for operation. * * <p>Tracks in HLS are complicated. A HLS master playlist contains a number of "variants". Each * variant stream typically contains muxed video, audio and (possibly) additional audio, metadata * and caption tracks. We wish to allow the user to select between an adaptive track that spans * all variants, as well as each individual variant. If multiple audio tracks are present within * each variant then we wish to allow the user to select between those also. * * <p>To do this, tracks are constructed as follows. The {@link HlsChunkSource} exposes (N+1) * tracks, where N is the number of variants defined in the HLS master playlist. These consist of * one adaptive track defined to span all variants and a track for each individual variant. The * adaptive track is initially selected. The extractor is then prepared to discover the tracks * inside of each variant stream. The two sets of tracks are then combined by this method to * create a third set, which is the set exposed by this {@link HlsSampleStreamWrapper}: * * <ul> * <li>The extractor tracks are inspected to infer a "primary" track type. If a video track is * present then it is always the primary type. If not, audio is the primary type if present. * Else text is the primary type if present. Else there is no primary type. * <li>If there is exactly one extractor track of the primary type, it's expanded into (N+1) * exposed tracks, all of which correspond to the primary extractor track and each of which * corresponds to a different chunk source track. Selecting one of these tracks has the * effect of switching the selected track on the chunk source. * <li>All other extractor tracks are exposed directly. Selecting one of these tracks has the * effect of selecting an extractor track, leaving the selected track on the chunk source * unchanged. * </ul> */ private void buildTracksFromSampleStreams() { // Iterate through the extractor tracks to discover the "primary" track type, and the index // of the single track of this type. int primaryExtractorTrackType = C.TRACK_TYPE_NONE; int primaryExtractorTrackIndex = C.INDEX_UNSET; int extractorTrackCount = sampleQueues.length; for (int i = 0; i < extractorTrackCount; i++) { String sampleMimeType = sampleQueues[i].getUpstreamFormat().sampleMimeType; int trackType; if (MimeTypes.isVideo(sampleMimeType)) { trackType = C.TRACK_TYPE_VIDEO; } else if (MimeTypes.isAudio(sampleMimeType)) { trackType = C.TRACK_TYPE_AUDIO; } else if (MimeTypes.isText(sampleMimeType)) { trackType = C.TRACK_TYPE_TEXT; } else { trackType = C.TRACK_TYPE_NONE; } if (getTrackTypeScore(trackType) > getTrackTypeScore(primaryExtractorTrackType)) { primaryExtractorTrackType = trackType; primaryExtractorTrackIndex = i; } else if (trackType == primaryExtractorTrackType && primaryExtractorTrackIndex != C.INDEX_UNSET) { // We have multiple tracks of the primary type. We only want an index if there only exists a // single track of the primary type, so unset the index again. primaryExtractorTrackIndex = C.INDEX_UNSET; } } TrackGroup chunkSourceTrackGroup = chunkSource.getTrackGroup(); int chunkSourceTrackCount = chunkSourceTrackGroup.length; // Instantiate the necessary internal data-structures. primaryTrackGroupIndex = C.INDEX_UNSET; trackGroupToSampleQueueIndex = new int[extractorTrackCount]; for (int i = 0; i < extractorTrackCount; i++) { trackGroupToSampleQueueIndex[i] = i; } // Construct the set of exposed track groups. TrackGroup[] trackGroups = new TrackGroup[extractorTrackCount]; for (int i = 0; i < extractorTrackCount; i++) { Format sampleFormat = sampleQueues[i].getUpstreamFormat(); if (i == primaryExtractorTrackIndex) { Format[] formats = new Format[chunkSourceTrackCount]; if (chunkSourceTrackCount == 1) { formats[0] = sampleFormat.copyWithManifestFormatInfo(chunkSourceTrackGroup.getFormat(0)); } else { for (int j = 0; j < chunkSourceTrackCount; j++) { formats[j] = deriveFormat(chunkSourceTrackGroup.getFormat(j), sampleFormat, true); } } trackGroups[i] = new TrackGroup(formats); primaryTrackGroupIndex = i; } else { Format trackFormat = primaryExtractorTrackType == C.TRACK_TYPE_VIDEO && MimeTypes.isAudio(sampleFormat.sampleMimeType) ? muxedAudioFormat : null; trackGroups[i] = new TrackGroup(deriveFormat(trackFormat, sampleFormat, false)); } } this.trackGroups = new TrackGroupArray(trackGroups); Assertions.checkState(optionalTrackGroups == null); optionalTrackGroups = TrackGroupArray.EMPTY; }
Example 17
Source File: DashManifestParser.java From Telegram with GNU General Public License v2.0 | 4 votes |
protected Format buildFormat( String id, String containerMimeType, int width, int height, float frameRate, int audioChannels, int audioSamplingRate, int bitrate, String language, List<Descriptor> roleDescriptors, List<Descriptor> accessibilityDescriptors, String codecs, List<Descriptor> supplementalProperties) { String sampleMimeType = getSampleMimeType(containerMimeType, codecs); @C.SelectionFlags int selectionFlags = parseSelectionFlagsFromRoleDescriptors(roleDescriptors); @C.RoleFlags int roleFlags = parseRoleFlagsFromRoleDescriptors(roleDescriptors); roleFlags |= parseRoleFlagsFromAccessibilityDescriptors(accessibilityDescriptors); if (sampleMimeType != null) { if (MimeTypes.AUDIO_E_AC3.equals(sampleMimeType)) { sampleMimeType = parseEac3SupplementalProperties(supplementalProperties); } if (MimeTypes.isVideo(sampleMimeType)) { return Format.createVideoContainerFormat( id, /* label= */ null, containerMimeType, sampleMimeType, codecs, /* metadata= */ null, bitrate, width, height, frameRate, /* initializationData= */ null, selectionFlags, roleFlags); } else if (MimeTypes.isAudio(sampleMimeType)) { return Format.createAudioContainerFormat( id, /* label= */ null, containerMimeType, sampleMimeType, codecs, /* metadata= */ null, bitrate, audioChannels, audioSamplingRate, /* initializationData= */ null, selectionFlags, roleFlags, language); } else if (mimeTypeIsRawText(sampleMimeType)) { int accessibilityChannel; if (MimeTypes.APPLICATION_CEA608.equals(sampleMimeType)) { accessibilityChannel = parseCea608AccessibilityChannel(accessibilityDescriptors); } else if (MimeTypes.APPLICATION_CEA708.equals(sampleMimeType)) { accessibilityChannel = parseCea708AccessibilityChannel(accessibilityDescriptors); } else { accessibilityChannel = Format.NO_VALUE; } return Format.createTextContainerFormat( id, /* label= */ null, containerMimeType, sampleMimeType, codecs, bitrate, selectionFlags, roleFlags, language, accessibilityChannel); } } return Format.createContainerFormat( id, /* label= */ null, containerMimeType, sampleMimeType, codecs, bitrate, selectionFlags, roleFlags, language); }
Example 18
Source File: ProgressiveMediaPeriod.java From Telegram-FOSS with GNU General Public License v2.0 | 4 votes |
private void maybeFinishPrepare() { SeekMap seekMap = this.seekMap; if (released || prepared || !sampleQueuesBuilt || seekMap == null) { return; } for (SampleQueue sampleQueue : sampleQueues) { if (sampleQueue.getUpstreamFormat() == null) { return; } } loadCondition.close(); int trackCount = sampleQueues.length; TrackGroup[] trackArray = new TrackGroup[trackCount]; boolean[] trackIsAudioVideoFlags = new boolean[trackCount]; durationUs = seekMap.getDurationUs(); for (int i = 0; i < trackCount; i++) { Format trackFormat = sampleQueues[i].getUpstreamFormat(); String mimeType = trackFormat.sampleMimeType; boolean isAudio = MimeTypes.isAudio(mimeType); boolean isAudioVideo = isAudio || MimeTypes.isVideo(mimeType); trackIsAudioVideoFlags[i] = isAudioVideo; haveAudioVideoTracks |= isAudioVideo; IcyHeaders icyHeaders = this.icyHeaders; if (icyHeaders != null) { if (isAudio || sampleQueueTrackIds[i].isIcyTrack) { Metadata metadata = trackFormat.metadata; trackFormat = trackFormat.copyWithMetadata( metadata == null ? new Metadata(icyHeaders) : metadata.copyWithAppendedEntries(icyHeaders)); } if (isAudio && trackFormat.bitrate == Format.NO_VALUE && icyHeaders.bitrate != Format.NO_VALUE) { trackFormat = trackFormat.copyWithBitrate(icyHeaders.bitrate); } } trackArray[i] = new TrackGroup(trackFormat); } dataType = length == C.LENGTH_UNSET && seekMap.getDurationUs() == C.TIME_UNSET ? C.DATA_TYPE_MEDIA_PROGRESSIVE_LIVE : C.DATA_TYPE_MEDIA; preparedState = new PreparedState(seekMap, new TrackGroupArray(trackArray), trackIsAudioVideoFlags); prepared = true; listener.onSourceInfoRefreshed(durationUs, seekMap.isSeekable()); Assertions.checkNotNull(callback).onPrepared(this); }
Example 19
Source File: HlsSampleStreamWrapper.java From TelePlus-Android with GNU General Public License v2.0 | 4 votes |
/** * Builds tracks that are exposed by this {@link HlsSampleStreamWrapper} instance, as well as * internal data-structures required for operation. * * <p>Tracks in HLS are complicated. A HLS master playlist contains a number of "variants". Each * variant stream typically contains muxed video, audio and (possibly) additional audio, metadata * and caption tracks. We wish to allow the user to select between an adaptive track that spans * all variants, as well as each individual variant. If multiple audio tracks are present within * each variant then we wish to allow the user to select between those also. * * <p>To do this, tracks are constructed as follows. The {@link HlsChunkSource} exposes (N+1) * tracks, where N is the number of variants defined in the HLS master playlist. These consist of * one adaptive track defined to span all variants and a track for each individual variant. The * adaptive track is initially selected. The extractor is then prepared to discover the tracks * inside of each variant stream. The two sets of tracks are then combined by this method to * create a third set, which is the set exposed by this {@link HlsSampleStreamWrapper}: * * <ul> * <li>The extractor tracks are inspected to infer a "primary" track type. If a video track is * present then it is always the primary type. If not, audio is the primary type if present. * Else text is the primary type if present. Else there is no primary type. * <li>If there is exactly one extractor track of the primary type, it's expanded into (N+1) * exposed tracks, all of which correspond to the primary extractor track and each of which * corresponds to a different chunk source track. Selecting one of these tracks has the * effect of switching the selected track on the chunk source. * <li>All other extractor tracks are exposed directly. Selecting one of these tracks has the * effect of selecting an extractor track, leaving the selected track on the chunk source * unchanged. * </ul> */ private void buildTracksFromSampleStreams() { // Iterate through the extractor tracks to discover the "primary" track type, and the index // of the single track of this type. int primaryExtractorTrackType = C.TRACK_TYPE_NONE; int primaryExtractorTrackIndex = C.INDEX_UNSET; int extractorTrackCount = sampleQueues.length; for (int i = 0; i < extractorTrackCount; i++) { String sampleMimeType = sampleQueues[i].getUpstreamFormat().sampleMimeType; int trackType; if (MimeTypes.isVideo(sampleMimeType)) { trackType = C.TRACK_TYPE_VIDEO; } else if (MimeTypes.isAudio(sampleMimeType)) { trackType = C.TRACK_TYPE_AUDIO; } else if (MimeTypes.isText(sampleMimeType)) { trackType = C.TRACK_TYPE_TEXT; } else { trackType = C.TRACK_TYPE_NONE; } if (getTrackTypeScore(trackType) > getTrackTypeScore(primaryExtractorTrackType)) { primaryExtractorTrackType = trackType; primaryExtractorTrackIndex = i; } else if (trackType == primaryExtractorTrackType && primaryExtractorTrackIndex != C.INDEX_UNSET) { // We have multiple tracks of the primary type. We only want an index if there only exists a // single track of the primary type, so unset the index again. primaryExtractorTrackIndex = C.INDEX_UNSET; } } TrackGroup chunkSourceTrackGroup = chunkSource.getTrackGroup(); int chunkSourceTrackCount = chunkSourceTrackGroup.length; // Instantiate the necessary internal data-structures. primaryTrackGroupIndex = C.INDEX_UNSET; trackGroupToSampleQueueIndex = new int[extractorTrackCount]; for (int i = 0; i < extractorTrackCount; i++) { trackGroupToSampleQueueIndex[i] = i; } // Construct the set of exposed track groups. TrackGroup[] trackGroups = new TrackGroup[extractorTrackCount]; for (int i = 0; i < extractorTrackCount; i++) { Format sampleFormat = sampleQueues[i].getUpstreamFormat(); if (i == primaryExtractorTrackIndex) { Format[] formats = new Format[chunkSourceTrackCount]; if (chunkSourceTrackCount == 1) { formats[0] = sampleFormat.copyWithManifestFormatInfo(chunkSourceTrackGroup.getFormat(0)); } else { for (int j = 0; j < chunkSourceTrackCount; j++) { formats[j] = deriveFormat(chunkSourceTrackGroup.getFormat(j), sampleFormat, true); } } trackGroups[i] = new TrackGroup(formats); primaryTrackGroupIndex = i; } else { Format trackFormat = primaryExtractorTrackType == C.TRACK_TYPE_VIDEO && MimeTypes.isAudio(sampleFormat.sampleMimeType) ? muxedAudioFormat : null; trackGroups[i] = new TrackGroup(deriveFormat(trackFormat, sampleFormat, false)); } } this.trackGroups = new TrackGroupArray(trackGroups); Assertions.checkState(optionalTrackGroups == null); optionalTrackGroups = TrackGroupArray.EMPTY; }
Example 20
Source File: HlsSampleStreamWrapper.java From MediaSDK with Apache License 2.0 | 4 votes |
/** * Builds tracks that are exposed by this {@link HlsSampleStreamWrapper} instance, as well as * internal data-structures required for operation. * * <p>Tracks in HLS are complicated. A HLS master playlist contains a number of "variants". Each * variant stream typically contains muxed video, audio and (possibly) additional audio, metadata * and caption tracks. We wish to allow the user to select between an adaptive track that spans * all variants, as well as each individual variant. If multiple audio tracks are present within * each variant then we wish to allow the user to select between those also. * * <p>To do this, tracks are constructed as follows. The {@link HlsChunkSource} exposes (N+1) * tracks, where N is the number of variants defined in the HLS master playlist. These consist of * one adaptive track defined to span all variants and a track for each individual variant. The * adaptive track is initially selected. The extractor is then prepared to discover the tracks * inside of each variant stream. The two sets of tracks are then combined by this method to * create a third set, which is the set exposed by this {@link HlsSampleStreamWrapper}: * * <ul> * <li>The extractor tracks are inspected to infer a "primary" track type. If a video track is * present then it is always the primary type. If not, audio is the primary type if present. * Else text is the primary type if present. Else there is no primary type. * <li>If there is exactly one extractor track of the primary type, it's expanded into (N+1) * exposed tracks, all of which correspond to the primary extractor track and each of which * corresponds to a different chunk source track. Selecting one of these tracks has the * effect of switching the selected track on the chunk source. * <li>All other extractor tracks are exposed directly. Selecting one of these tracks has the * effect of selecting an extractor track, leaving the selected track on the chunk source * unchanged. * </ul> */ @EnsuresNonNull({"trackGroups", "optionalTrackGroups", "trackGroupToSampleQueueIndex"}) private void buildTracksFromSampleStreams() { // Iterate through the extractor tracks to discover the "primary" track type, and the index // of the single track of this type. int primaryExtractorTrackType = C.TRACK_TYPE_NONE; int primaryExtractorTrackIndex = C.INDEX_UNSET; int extractorTrackCount = sampleQueues.length; for (int i = 0; i < extractorTrackCount; i++) { String sampleMimeType = sampleQueues[i].getUpstreamFormat().sampleMimeType; int trackType; if (MimeTypes.isVideo(sampleMimeType)) { trackType = C.TRACK_TYPE_VIDEO; } else if (MimeTypes.isAudio(sampleMimeType)) { trackType = C.TRACK_TYPE_AUDIO; } else if (MimeTypes.isText(sampleMimeType)) { trackType = C.TRACK_TYPE_TEXT; } else { trackType = C.TRACK_TYPE_NONE; } if (getTrackTypeScore(trackType) > getTrackTypeScore(primaryExtractorTrackType)) { primaryExtractorTrackType = trackType; primaryExtractorTrackIndex = i; } else if (trackType == primaryExtractorTrackType && primaryExtractorTrackIndex != C.INDEX_UNSET) { // We have multiple tracks of the primary type. We only want an index if there only exists a // single track of the primary type, so unset the index again. primaryExtractorTrackIndex = C.INDEX_UNSET; } } TrackGroup chunkSourceTrackGroup = chunkSource.getTrackGroup(); int chunkSourceTrackCount = chunkSourceTrackGroup.length; // Instantiate the necessary internal data-structures. primaryTrackGroupIndex = C.INDEX_UNSET; trackGroupToSampleQueueIndex = new int[extractorTrackCount]; for (int i = 0; i < extractorTrackCount; i++) { trackGroupToSampleQueueIndex[i] = i; } // Construct the set of exposed track groups. TrackGroup[] trackGroups = new TrackGroup[extractorTrackCount]; for (int i = 0; i < extractorTrackCount; i++) { Format sampleFormat = sampleQueues[i].getUpstreamFormat(); if (i == primaryExtractorTrackIndex) { Format[] formats = new Format[chunkSourceTrackCount]; if (chunkSourceTrackCount == 1) { formats[0] = sampleFormat.copyWithManifestFormatInfo(chunkSourceTrackGroup.getFormat(0)); } else { for (int j = 0; j < chunkSourceTrackCount; j++) { formats[j] = deriveFormat(chunkSourceTrackGroup.getFormat(j), sampleFormat, true); } } trackGroups[i] = new TrackGroup(formats); primaryTrackGroupIndex = i; } else { Format trackFormat = primaryExtractorTrackType == C.TRACK_TYPE_VIDEO && MimeTypes.isAudio(sampleFormat.sampleMimeType) ? muxedAudioFormat : null; trackGroups[i] = new TrackGroup(deriveFormat(trackFormat, sampleFormat, false)); } } this.trackGroups = createTrackGroupArrayWithDrmInfo(trackGroups); Assertions.checkState(optionalTrackGroups == null); optionalTrackGroups = Collections.emptySet(); }