Java Code Examples for com.google.android.exoplayer2.C#TRACK_TYPE_NONE
The following examples show how to use
com.google.android.exoplayer2.C#TRACK_TYPE_NONE .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Util.java From MediaSDK with Apache License 2.0 | 6 votes |
/** * Returns a string representation of a {@code TRACK_TYPE_*} constant defined in {@link C}. * * @param trackType A {@code TRACK_TYPE_*} constant, * @return A string representation of this constant. */ public static String getTrackTypeString(int trackType) { switch (trackType) { case C.TRACK_TYPE_AUDIO: return "audio"; case C.TRACK_TYPE_DEFAULT: return "default"; case C.TRACK_TYPE_METADATA: return "metadata"; case C.TRACK_TYPE_CAMERA_MOTION: return "camera motion"; case C.TRACK_TYPE_NONE: return "none"; case C.TRACK_TYPE_TEXT: return "text"; case C.TRACK_TYPE_VIDEO: return "video"; default: return trackType >= C.TRACK_TYPE_CUSTOM_BASE ? "custom (" + trackType + ")" : "?"; } }
Example 2
Source File: EventLogger.java From TelePlus-Android with GNU General Public License v2.0 | 6 votes |
private static String getTrackTypeString(int trackType) { switch (trackType) { case C.TRACK_TYPE_AUDIO: return "audio"; case C.TRACK_TYPE_DEFAULT: return "default"; case C.TRACK_TYPE_METADATA: return "metadata"; case C.TRACK_TYPE_NONE: return "none"; case C.TRACK_TYPE_TEXT: return "text"; case C.TRACK_TYPE_VIDEO: return "video"; default: return trackType >= C.TRACK_TYPE_CUSTOM_BASE ? "custom (" + trackType + ")" : "?"; } }
Example 3
Source File: EventLogger.java From TelePlus-Android with GNU General Public License v2.0 | 6 votes |
private static String getTrackTypeString(int trackType) { switch (trackType) { case C.TRACK_TYPE_AUDIO: return "audio"; case C.TRACK_TYPE_DEFAULT: return "default"; case C.TRACK_TYPE_METADATA: return "metadata"; case C.TRACK_TYPE_NONE: return "none"; case C.TRACK_TYPE_TEXT: return "text"; case C.TRACK_TYPE_VIDEO: return "video"; default: return trackType >= C.TRACK_TYPE_CUSTOM_BASE ? "custom (" + trackType + ")" : "?"; } }
Example 4
Source File: EventLogger.java From Telegram-FOSS with GNU General Public License v2.0 | 6 votes |
private static String getTrackTypeString(int trackType) { switch (trackType) { case C.TRACK_TYPE_AUDIO: return "audio"; case C.TRACK_TYPE_DEFAULT: return "default"; case C.TRACK_TYPE_METADATA: return "metadata"; case C.TRACK_TYPE_CAMERA_MOTION: return "camera motion"; case C.TRACK_TYPE_NONE: return "none"; case C.TRACK_TYPE_TEXT: return "text"; case C.TRACK_TYPE_VIDEO: return "video"; default: return trackType >= C.TRACK_TYPE_CUSTOM_BASE ? "custom (" + trackType + ")" : "?"; } }
Example 5
Source File: EventLogger.java From Telegram with GNU General Public License v2.0 | 6 votes |
private static String getTrackTypeString(int trackType) { switch (trackType) { case C.TRACK_TYPE_AUDIO: return "audio"; case C.TRACK_TYPE_DEFAULT: return "default"; case C.TRACK_TYPE_METADATA: return "metadata"; case C.TRACK_TYPE_CAMERA_MOTION: return "camera motion"; case C.TRACK_TYPE_NONE: return "none"; case C.TRACK_TYPE_TEXT: return "text"; case C.TRACK_TYPE_VIDEO: return "video"; default: return trackType >= C.TRACK_TYPE_CUSTOM_BASE ? "custom (" + trackType + ")" : "?"; } }
Example 6
Source File: DefaultTrackSelector.java From MediaSDK with Apache License 2.0 | 4 votes |
@Override protected final Pair<@NullableType RendererConfiguration[], @NullableType TrackSelection[]> selectTracks( MappedTrackInfo mappedTrackInfo, @Capabilities int[][][] rendererFormatSupports, @AdaptiveSupport int[] rendererMixedMimeTypeAdaptationSupports) throws ExoPlaybackException { Parameters params = parametersReference.get(); int rendererCount = mappedTrackInfo.getRendererCount(); TrackSelection.@NullableType Definition[] definitions = selectAllTracks( mappedTrackInfo, rendererFormatSupports, rendererMixedMimeTypeAdaptationSupports, params); // Apply track disabling and overriding. for (int i = 0; i < rendererCount; i++) { if (params.getRendererDisabled(i)) { definitions[i] = null; continue; } TrackGroupArray rendererTrackGroups = mappedTrackInfo.getTrackGroups(i); if (params.hasSelectionOverride(i, rendererTrackGroups)) { SelectionOverride override = params.getSelectionOverride(i, rendererTrackGroups); definitions[i] = override == null ? null : new TrackSelection.Definition( rendererTrackGroups.get(override.groupIndex), override.tracks, override.reason, override.data); } } @NullableType TrackSelection[] rendererTrackSelections = trackSelectionFactory.createTrackSelections(definitions, getBandwidthMeter()); // Initialize the renderer configurations to the default configuration for all renderers with // selections, and null otherwise. @NullableType RendererConfiguration[] rendererConfigurations = new RendererConfiguration[rendererCount]; for (int i = 0; i < rendererCount; i++) { boolean forceRendererDisabled = params.getRendererDisabled(i); boolean rendererEnabled = !forceRendererDisabled && (mappedTrackInfo.getRendererType(i) == C.TRACK_TYPE_NONE || rendererTrackSelections[i] != null); rendererConfigurations[i] = rendererEnabled ? RendererConfiguration.DEFAULT : null; } // Configure audio and video renderers to use tunneling if appropriate. maybeConfigureRenderersForTunneling( mappedTrackInfo, rendererFormatSupports, rendererConfigurations, rendererTrackSelections, params.tunnelingAudioSessionId); return Pair.create(rendererConfigurations, rendererTrackSelections); }
Example 7
Source File: HlsSampleStreamWrapper.java From MediaSDK with Apache License 2.0 | 4 votes |
/** * Builds tracks that are exposed by this {@link HlsSampleStreamWrapper} instance, as well as * internal data-structures required for operation. * * <p>Tracks in HLS are complicated. A HLS master playlist contains a number of "variants". Each * variant stream typically contains muxed video, audio and (possibly) additional audio, metadata * and caption tracks. We wish to allow the user to select between an adaptive track that spans * all variants, as well as each individual variant. If multiple audio tracks are present within * each variant then we wish to allow the user to select between those also. * * <p>To do this, tracks are constructed as follows. The {@link HlsChunkSource} exposes (N+1) * tracks, where N is the number of variants defined in the HLS master playlist. These consist of * one adaptive track defined to span all variants and a track for each individual variant. The * adaptive track is initially selected. The extractor is then prepared to discover the tracks * inside of each variant stream. The two sets of tracks are then combined by this method to * create a third set, which is the set exposed by this {@link HlsSampleStreamWrapper}: * * <ul> * <li>The extractor tracks are inspected to infer a "primary" track type. If a video track is * present then it is always the primary type. If not, audio is the primary type if present. * Else text is the primary type if present. Else there is no primary type. * <li>If there is exactly one extractor track of the primary type, it's expanded into (N+1) * exposed tracks, all of which correspond to the primary extractor track and each of which * corresponds to a different chunk source track. Selecting one of these tracks has the * effect of switching the selected track on the chunk source. * <li>All other extractor tracks are exposed directly. Selecting one of these tracks has the * effect of selecting an extractor track, leaving the selected track on the chunk source * unchanged. * </ul> */ @EnsuresNonNull({"trackGroups", "optionalTrackGroups", "trackGroupToSampleQueueIndex"}) private void buildTracksFromSampleStreams() { // Iterate through the extractor tracks to discover the "primary" track type, and the index // of the single track of this type. int primaryExtractorTrackType = C.TRACK_TYPE_NONE; int primaryExtractorTrackIndex = C.INDEX_UNSET; int extractorTrackCount = sampleQueues.length; for (int i = 0; i < extractorTrackCount; i++) { String sampleMimeType = sampleQueues[i].getUpstreamFormat().sampleMimeType; int trackType; if (MimeTypes.isVideo(sampleMimeType)) { trackType = C.TRACK_TYPE_VIDEO; } else if (MimeTypes.isAudio(sampleMimeType)) { trackType = C.TRACK_TYPE_AUDIO; } else if (MimeTypes.isText(sampleMimeType)) { trackType = C.TRACK_TYPE_TEXT; } else { trackType = C.TRACK_TYPE_NONE; } if (getTrackTypeScore(trackType) > getTrackTypeScore(primaryExtractorTrackType)) { primaryExtractorTrackType = trackType; primaryExtractorTrackIndex = i; } else if (trackType == primaryExtractorTrackType && primaryExtractorTrackIndex != C.INDEX_UNSET) { // We have multiple tracks of the primary type. We only want an index if there only exists a // single track of the primary type, so unset the index again. primaryExtractorTrackIndex = C.INDEX_UNSET; } } TrackGroup chunkSourceTrackGroup = chunkSource.getTrackGroup(); int chunkSourceTrackCount = chunkSourceTrackGroup.length; // Instantiate the necessary internal data-structures. primaryTrackGroupIndex = C.INDEX_UNSET; trackGroupToSampleQueueIndex = new int[extractorTrackCount]; for (int i = 0; i < extractorTrackCount; i++) { trackGroupToSampleQueueIndex[i] = i; } // Construct the set of exposed track groups. TrackGroup[] trackGroups = new TrackGroup[extractorTrackCount]; for (int i = 0; i < extractorTrackCount; i++) { Format sampleFormat = sampleQueues[i].getUpstreamFormat(); if (i == primaryExtractorTrackIndex) { Format[] formats = new Format[chunkSourceTrackCount]; if (chunkSourceTrackCount == 1) { formats[0] = sampleFormat.copyWithManifestFormatInfo(chunkSourceTrackGroup.getFormat(0)); } else { for (int j = 0; j < chunkSourceTrackCount; j++) { formats[j] = deriveFormat(chunkSourceTrackGroup.getFormat(j), sampleFormat, true); } } trackGroups[i] = new TrackGroup(formats); primaryTrackGroupIndex = i; } else { Format trackFormat = primaryExtractorTrackType == C.TRACK_TYPE_VIDEO && MimeTypes.isAudio(sampleFormat.sampleMimeType) ? muxedAudioFormat : null; trackGroups[i] = new TrackGroup(deriveFormat(trackFormat, sampleFormat, false)); } } this.trackGroups = createTrackGroupArrayWithDrmInfo(trackGroups); Assertions.checkState(optionalTrackGroups == null); optionalTrackGroups = Collections.emptySet(); }
Example 8
Source File: DefaultTrackSelector.java From TelePlus-Android with GNU General Public License v2.0 | 4 votes |
@Override protected final Pair<@NullableType RendererConfiguration[], @NullableType TrackSelection[]> selectTracks( MappedTrackInfo mappedTrackInfo, int[][][] rendererFormatSupports, int[] rendererMixedMimeTypeAdaptationSupports) throws ExoPlaybackException { Parameters params = parametersReference.get(); int rendererCount = mappedTrackInfo.getRendererCount(); @NullableType TrackSelection[] rendererTrackSelections = selectAllTracks( mappedTrackInfo, rendererFormatSupports, rendererMixedMimeTypeAdaptationSupports, params); // Apply track disabling and overriding. for (int i = 0; i < rendererCount; i++) { if (params.getRendererDisabled(i)) { rendererTrackSelections[i] = null; } else { TrackGroupArray rendererTrackGroups = mappedTrackInfo.getTrackGroups(i); if (params.hasSelectionOverride(i, rendererTrackGroups)) { SelectionOverride override = params.getSelectionOverride(i, rendererTrackGroups); if (override == null) { rendererTrackSelections[i] = null; } else if (override.length == 1) { rendererTrackSelections[i] = new FixedTrackSelection( rendererTrackGroups.get(override.groupIndex), override.tracks[0]); } else { rendererTrackSelections[i] = Assertions.checkNotNull(adaptiveTrackSelectionFactory) .createTrackSelection( rendererTrackGroups.get(override.groupIndex), getBandwidthMeter(), override.tracks); } } } } // Initialize the renderer configurations to the default configuration for all renderers with // selections, and null otherwise. @NullableType RendererConfiguration[] rendererConfigurations = new RendererConfiguration[rendererCount]; for (int i = 0; i < rendererCount; i++) { boolean forceRendererDisabled = params.getRendererDisabled(i); boolean rendererEnabled = !forceRendererDisabled && (mappedTrackInfo.getRendererType(i) == C.TRACK_TYPE_NONE || rendererTrackSelections[i] != null); rendererConfigurations[i] = rendererEnabled ? RendererConfiguration.DEFAULT : null; } // Configure audio and video renderers to use tunneling if appropriate. maybeConfigureRenderersForTunneling( mappedTrackInfo, rendererFormatSupports, rendererConfigurations, rendererTrackSelections, params.tunnelingAudioSessionId); return Pair.create(rendererConfigurations, rendererTrackSelections); }
Example 9
Source File: HlsSampleStreamWrapper.java From TelePlus-Android with GNU General Public License v2.0 | 4 votes |
/** * Builds tracks that are exposed by this {@link HlsSampleStreamWrapper} instance, as well as * internal data-structures required for operation. * * <p>Tracks in HLS are complicated. A HLS master playlist contains a number of "variants". Each * variant stream typically contains muxed video, audio and (possibly) additional audio, metadata * and caption tracks. We wish to allow the user to select between an adaptive track that spans * all variants, as well as each individual variant. If multiple audio tracks are present within * each variant then we wish to allow the user to select between those also. * * <p>To do this, tracks are constructed as follows. The {@link HlsChunkSource} exposes (N+1) * tracks, where N is the number of variants defined in the HLS master playlist. These consist of * one adaptive track defined to span all variants and a track for each individual variant. The * adaptive track is initially selected. The extractor is then prepared to discover the tracks * inside of each variant stream. The two sets of tracks are then combined by this method to * create a third set, which is the set exposed by this {@link HlsSampleStreamWrapper}: * * <ul> * <li>The extractor tracks are inspected to infer a "primary" track type. If a video track is * present then it is always the primary type. If not, audio is the primary type if present. * Else text is the primary type if present. Else there is no primary type. * <li>If there is exactly one extractor track of the primary type, it's expanded into (N+1) * exposed tracks, all of which correspond to the primary extractor track and each of which * corresponds to a different chunk source track. Selecting one of these tracks has the * effect of switching the selected track on the chunk source. * <li>All other extractor tracks are exposed directly. Selecting one of these tracks has the * effect of selecting an extractor track, leaving the selected track on the chunk source * unchanged. * </ul> */ private void buildTracksFromSampleStreams() { // Iterate through the extractor tracks to discover the "primary" track type, and the index // of the single track of this type. int primaryExtractorTrackType = C.TRACK_TYPE_NONE; int primaryExtractorTrackIndex = C.INDEX_UNSET; int extractorTrackCount = sampleQueues.length; for (int i = 0; i < extractorTrackCount; i++) { String sampleMimeType = sampleQueues[i].getUpstreamFormat().sampleMimeType; int trackType; if (MimeTypes.isVideo(sampleMimeType)) { trackType = C.TRACK_TYPE_VIDEO; } else if (MimeTypes.isAudio(sampleMimeType)) { trackType = C.TRACK_TYPE_AUDIO; } else if (MimeTypes.isText(sampleMimeType)) { trackType = C.TRACK_TYPE_TEXT; } else { trackType = C.TRACK_TYPE_NONE; } if (getTrackTypeScore(trackType) > getTrackTypeScore(primaryExtractorTrackType)) { primaryExtractorTrackType = trackType; primaryExtractorTrackIndex = i; } else if (trackType == primaryExtractorTrackType && primaryExtractorTrackIndex != C.INDEX_UNSET) { // We have multiple tracks of the primary type. We only want an index if there only exists a // single track of the primary type, so unset the index again. primaryExtractorTrackIndex = C.INDEX_UNSET; } } TrackGroup chunkSourceTrackGroup = chunkSource.getTrackGroup(); int chunkSourceTrackCount = chunkSourceTrackGroup.length; // Instantiate the necessary internal data-structures. primaryTrackGroupIndex = C.INDEX_UNSET; trackGroupToSampleQueueIndex = new int[extractorTrackCount]; for (int i = 0; i < extractorTrackCount; i++) { trackGroupToSampleQueueIndex[i] = i; } // Construct the set of exposed track groups. TrackGroup[] trackGroups = new TrackGroup[extractorTrackCount]; for (int i = 0; i < extractorTrackCount; i++) { Format sampleFormat = sampleQueues[i].getUpstreamFormat(); if (i == primaryExtractorTrackIndex) { Format[] formats = new Format[chunkSourceTrackCount]; if (chunkSourceTrackCount == 1) { formats[0] = sampleFormat.copyWithManifestFormatInfo(chunkSourceTrackGroup.getFormat(0)); } else { for (int j = 0; j < chunkSourceTrackCount; j++) { formats[j] = deriveFormat(chunkSourceTrackGroup.getFormat(j), sampleFormat, true); } } trackGroups[i] = new TrackGroup(formats); primaryTrackGroupIndex = i; } else { Format trackFormat = primaryExtractorTrackType == C.TRACK_TYPE_VIDEO && MimeTypes.isAudio(sampleFormat.sampleMimeType) ? muxedAudioFormat : null; trackGroups[i] = new TrackGroup(deriveFormat(trackFormat, sampleFormat, false)); } } this.trackGroups = new TrackGroupArray(trackGroups); Assertions.checkState(optionalTrackGroups == null); optionalTrackGroups = TrackGroupArray.EMPTY; }
Example 10
Source File: DefaultTrackSelector.java From TelePlus-Android with GNU General Public License v2.0 | 4 votes |
@Override protected final Pair<@NullableType RendererConfiguration[], @NullableType TrackSelection[]> selectTracks( MappedTrackInfo mappedTrackInfo, int[][][] rendererFormatSupports, int[] rendererMixedMimeTypeAdaptationSupports) throws ExoPlaybackException { Parameters params = parametersReference.get(); int rendererCount = mappedTrackInfo.getRendererCount(); @NullableType TrackSelection[] rendererTrackSelections = selectAllTracks( mappedTrackInfo, rendererFormatSupports, rendererMixedMimeTypeAdaptationSupports, params); // Apply track disabling and overriding. for (int i = 0; i < rendererCount; i++) { if (params.getRendererDisabled(i)) { rendererTrackSelections[i] = null; } else { TrackGroupArray rendererTrackGroups = mappedTrackInfo.getTrackGroups(i); if (params.hasSelectionOverride(i, rendererTrackGroups)) { SelectionOverride override = params.getSelectionOverride(i, rendererTrackGroups); if (override == null) { rendererTrackSelections[i] = null; } else if (override.length == 1) { rendererTrackSelections[i] = new FixedTrackSelection( rendererTrackGroups.get(override.groupIndex), override.tracks[0]); } else { rendererTrackSelections[i] = Assertions.checkNotNull(adaptiveTrackSelectionFactory) .createTrackSelection( rendererTrackGroups.get(override.groupIndex), getBandwidthMeter(), override.tracks); } } } } // Initialize the renderer configurations to the default configuration for all renderers with // selections, and null otherwise. @NullableType RendererConfiguration[] rendererConfigurations = new RendererConfiguration[rendererCount]; for (int i = 0; i < rendererCount; i++) { boolean forceRendererDisabled = params.getRendererDisabled(i); boolean rendererEnabled = !forceRendererDisabled && (mappedTrackInfo.getRendererType(i) == C.TRACK_TYPE_NONE || rendererTrackSelections[i] != null); rendererConfigurations[i] = rendererEnabled ? RendererConfiguration.DEFAULT : null; } // Configure audio and video renderers to use tunneling if appropriate. maybeConfigureRenderersForTunneling( mappedTrackInfo, rendererFormatSupports, rendererConfigurations, rendererTrackSelections, params.tunnelingAudioSessionId); return Pair.create(rendererConfigurations, rendererTrackSelections); }
Example 11
Source File: HlsSampleStreamWrapper.java From TelePlus-Android with GNU General Public License v2.0 | 4 votes |
/** * Builds tracks that are exposed by this {@link HlsSampleStreamWrapper} instance, as well as * internal data-structures required for operation. * * <p>Tracks in HLS are complicated. A HLS master playlist contains a number of "variants". Each * variant stream typically contains muxed video, audio and (possibly) additional audio, metadata * and caption tracks. We wish to allow the user to select between an adaptive track that spans * all variants, as well as each individual variant. If multiple audio tracks are present within * each variant then we wish to allow the user to select between those also. * * <p>To do this, tracks are constructed as follows. The {@link HlsChunkSource} exposes (N+1) * tracks, where N is the number of variants defined in the HLS master playlist. These consist of * one adaptive track defined to span all variants and a track for each individual variant. The * adaptive track is initially selected. The extractor is then prepared to discover the tracks * inside of each variant stream. The two sets of tracks are then combined by this method to * create a third set, which is the set exposed by this {@link HlsSampleStreamWrapper}: * * <ul> * <li>The extractor tracks are inspected to infer a "primary" track type. If a video track is * present then it is always the primary type. If not, audio is the primary type if present. * Else text is the primary type if present. Else there is no primary type. * <li>If there is exactly one extractor track of the primary type, it's expanded into (N+1) * exposed tracks, all of which correspond to the primary extractor track and each of which * corresponds to a different chunk source track. Selecting one of these tracks has the * effect of switching the selected track on the chunk source. * <li>All other extractor tracks are exposed directly. Selecting one of these tracks has the * effect of selecting an extractor track, leaving the selected track on the chunk source * unchanged. * </ul> */ private void buildTracksFromSampleStreams() { // Iterate through the extractor tracks to discover the "primary" track type, and the index // of the single track of this type. int primaryExtractorTrackType = C.TRACK_TYPE_NONE; int primaryExtractorTrackIndex = C.INDEX_UNSET; int extractorTrackCount = sampleQueues.length; for (int i = 0; i < extractorTrackCount; i++) { String sampleMimeType = sampleQueues[i].getUpstreamFormat().sampleMimeType; int trackType; if (MimeTypes.isVideo(sampleMimeType)) { trackType = C.TRACK_TYPE_VIDEO; } else if (MimeTypes.isAudio(sampleMimeType)) { trackType = C.TRACK_TYPE_AUDIO; } else if (MimeTypes.isText(sampleMimeType)) { trackType = C.TRACK_TYPE_TEXT; } else { trackType = C.TRACK_TYPE_NONE; } if (getTrackTypeScore(trackType) > getTrackTypeScore(primaryExtractorTrackType)) { primaryExtractorTrackType = trackType; primaryExtractorTrackIndex = i; } else if (trackType == primaryExtractorTrackType && primaryExtractorTrackIndex != C.INDEX_UNSET) { // We have multiple tracks of the primary type. We only want an index if there only exists a // single track of the primary type, so unset the index again. primaryExtractorTrackIndex = C.INDEX_UNSET; } } TrackGroup chunkSourceTrackGroup = chunkSource.getTrackGroup(); int chunkSourceTrackCount = chunkSourceTrackGroup.length; // Instantiate the necessary internal data-structures. primaryTrackGroupIndex = C.INDEX_UNSET; trackGroupToSampleQueueIndex = new int[extractorTrackCount]; for (int i = 0; i < extractorTrackCount; i++) { trackGroupToSampleQueueIndex[i] = i; } // Construct the set of exposed track groups. TrackGroup[] trackGroups = new TrackGroup[extractorTrackCount]; for (int i = 0; i < extractorTrackCount; i++) { Format sampleFormat = sampleQueues[i].getUpstreamFormat(); if (i == primaryExtractorTrackIndex) { Format[] formats = new Format[chunkSourceTrackCount]; if (chunkSourceTrackCount == 1) { formats[0] = sampleFormat.copyWithManifestFormatInfo(chunkSourceTrackGroup.getFormat(0)); } else { for (int j = 0; j < chunkSourceTrackCount; j++) { formats[j] = deriveFormat(chunkSourceTrackGroup.getFormat(j), sampleFormat, true); } } trackGroups[i] = new TrackGroup(formats); primaryTrackGroupIndex = i; } else { Format trackFormat = primaryExtractorTrackType == C.TRACK_TYPE_VIDEO && MimeTypes.isAudio(sampleFormat.sampleMimeType) ? muxedAudioFormat : null; trackGroups[i] = new TrackGroup(deriveFormat(trackFormat, sampleFormat, false)); } } this.trackGroups = new TrackGroupArray(trackGroups); Assertions.checkState(optionalTrackGroups == null); optionalTrackGroups = TrackGroupArray.EMPTY; }
Example 12
Source File: DefaultTrackSelector.java From Telegram-FOSS with GNU General Public License v2.0 | 4 votes |
@Override protected final Pair<@NullableType RendererConfiguration[], @NullableType TrackSelection[]> selectTracks( MappedTrackInfo mappedTrackInfo, int[][][] rendererFormatSupports, int[] rendererMixedMimeTypeAdaptationSupports) throws ExoPlaybackException { Parameters params = parametersReference.get(); int rendererCount = mappedTrackInfo.getRendererCount(); TrackSelection.@NullableType Definition[] definitions = selectAllTracks( mappedTrackInfo, rendererFormatSupports, rendererMixedMimeTypeAdaptationSupports, params); // Apply track disabling and overriding. for (int i = 0; i < rendererCount; i++) { if (params.getRendererDisabled(i)) { definitions[i] = null; continue; } TrackGroupArray rendererTrackGroups = mappedTrackInfo.getTrackGroups(i); if (params.hasSelectionOverride(i, rendererTrackGroups)) { SelectionOverride override = params.getSelectionOverride(i, rendererTrackGroups); definitions[i] = override == null ? null : new TrackSelection.Definition( rendererTrackGroups.get(override.groupIndex), override.tracks, override.reason, override.data); } } @NullableType TrackSelection[] rendererTrackSelections = trackSelectionFactory.createTrackSelections(definitions, getBandwidthMeter()); // Initialize the renderer configurations to the default configuration for all renderers with // selections, and null otherwise. @NullableType RendererConfiguration[] rendererConfigurations = new RendererConfiguration[rendererCount]; for (int i = 0; i < rendererCount; i++) { boolean forceRendererDisabled = params.getRendererDisabled(i); boolean rendererEnabled = !forceRendererDisabled && (mappedTrackInfo.getRendererType(i) == C.TRACK_TYPE_NONE || rendererTrackSelections[i] != null); rendererConfigurations[i] = rendererEnabled ? RendererConfiguration.DEFAULT : null; } // Configure audio and video renderers to use tunneling if appropriate. maybeConfigureRenderersForTunneling( mappedTrackInfo, rendererFormatSupports, rendererConfigurations, rendererTrackSelections, params.tunnelingAudioSessionId); return Pair.create(rendererConfigurations, rendererTrackSelections); }
Example 13
Source File: HlsSampleStreamWrapper.java From Telegram-FOSS with GNU General Public License v2.0 | 4 votes |
/** * Builds tracks that are exposed by this {@link HlsSampleStreamWrapper} instance, as well as * internal data-structures required for operation. * * <p>Tracks in HLS are complicated. A HLS master playlist contains a number of "variants". Each * variant stream typically contains muxed video, audio and (possibly) additional audio, metadata * and caption tracks. We wish to allow the user to select between an adaptive track that spans * all variants, as well as each individual variant. If multiple audio tracks are present within * each variant then we wish to allow the user to select between those also. * * <p>To do this, tracks are constructed as follows. The {@link HlsChunkSource} exposes (N+1) * tracks, where N is the number of variants defined in the HLS master playlist. These consist of * one adaptive track defined to span all variants and a track for each individual variant. The * adaptive track is initially selected. The extractor is then prepared to discover the tracks * inside of each variant stream. The two sets of tracks are then combined by this method to * create a third set, which is the set exposed by this {@link HlsSampleStreamWrapper}: * * <ul> * <li>The extractor tracks are inspected to infer a "primary" track type. If a video track is * present then it is always the primary type. If not, audio is the primary type if present. * Else text is the primary type if present. Else there is no primary type. * <li>If there is exactly one extractor track of the primary type, it's expanded into (N+1) * exposed tracks, all of which correspond to the primary extractor track and each of which * corresponds to a different chunk source track. Selecting one of these tracks has the * effect of switching the selected track on the chunk source. * <li>All other extractor tracks are exposed directly. Selecting one of these tracks has the * effect of selecting an extractor track, leaving the selected track on the chunk source * unchanged. * </ul> */ private void buildTracksFromSampleStreams() { // Iterate through the extractor tracks to discover the "primary" track type, and the index // of the single track of this type. int primaryExtractorTrackType = C.TRACK_TYPE_NONE; int primaryExtractorTrackIndex = C.INDEX_UNSET; int extractorTrackCount = sampleQueues.length; for (int i = 0; i < extractorTrackCount; i++) { String sampleMimeType = sampleQueues[i].getUpstreamFormat().sampleMimeType; int trackType; if (MimeTypes.isVideo(sampleMimeType)) { trackType = C.TRACK_TYPE_VIDEO; } else if (MimeTypes.isAudio(sampleMimeType)) { trackType = C.TRACK_TYPE_AUDIO; } else if (MimeTypes.isText(sampleMimeType)) { trackType = C.TRACK_TYPE_TEXT; } else { trackType = C.TRACK_TYPE_NONE; } if (getTrackTypeScore(trackType) > getTrackTypeScore(primaryExtractorTrackType)) { primaryExtractorTrackType = trackType; primaryExtractorTrackIndex = i; } else if (trackType == primaryExtractorTrackType && primaryExtractorTrackIndex != C.INDEX_UNSET) { // We have multiple tracks of the primary type. We only want an index if there only exists a // single track of the primary type, so unset the index again. primaryExtractorTrackIndex = C.INDEX_UNSET; } } TrackGroup chunkSourceTrackGroup = chunkSource.getTrackGroup(); int chunkSourceTrackCount = chunkSourceTrackGroup.length; // Instantiate the necessary internal data-structures. primaryTrackGroupIndex = C.INDEX_UNSET; trackGroupToSampleQueueIndex = new int[extractorTrackCount]; for (int i = 0; i < extractorTrackCount; i++) { trackGroupToSampleQueueIndex[i] = i; } // Construct the set of exposed track groups. TrackGroup[] trackGroups = new TrackGroup[extractorTrackCount]; for (int i = 0; i < extractorTrackCount; i++) { Format sampleFormat = sampleQueues[i].getUpstreamFormat(); if (i == primaryExtractorTrackIndex) { Format[] formats = new Format[chunkSourceTrackCount]; if (chunkSourceTrackCount == 1) { formats[0] = sampleFormat.copyWithManifestFormatInfo(chunkSourceTrackGroup.getFormat(0)); } else { for (int j = 0; j < chunkSourceTrackCount; j++) { formats[j] = deriveFormat(chunkSourceTrackGroup.getFormat(j), sampleFormat, true); } } trackGroups[i] = new TrackGroup(formats); primaryTrackGroupIndex = i; } else { Format trackFormat = primaryExtractorTrackType == C.TRACK_TYPE_VIDEO && MimeTypes.isAudio(sampleFormat.sampleMimeType) ? muxedAudioFormat : null; trackGroups[i] = new TrackGroup(deriveFormat(trackFormat, sampleFormat, false)); } } this.trackGroups = new TrackGroupArray(trackGroups); Assertions.checkState(optionalTrackGroups == null); optionalTrackGroups = TrackGroupArray.EMPTY; }
Example 14
Source File: DefaultTrackSelector.java From Telegram with GNU General Public License v2.0 | 4 votes |
@Override protected final Pair<@NullableType RendererConfiguration[], @NullableType TrackSelection[]> selectTracks( MappedTrackInfo mappedTrackInfo, int[][][] rendererFormatSupports, int[] rendererMixedMimeTypeAdaptationSupports) throws ExoPlaybackException { Parameters params = parametersReference.get(); int rendererCount = mappedTrackInfo.getRendererCount(); TrackSelection.@NullableType Definition[] definitions = selectAllTracks( mappedTrackInfo, rendererFormatSupports, rendererMixedMimeTypeAdaptationSupports, params); // Apply track disabling and overriding. for (int i = 0; i < rendererCount; i++) { if (params.getRendererDisabled(i)) { definitions[i] = null; continue; } TrackGroupArray rendererTrackGroups = mappedTrackInfo.getTrackGroups(i); if (params.hasSelectionOverride(i, rendererTrackGroups)) { SelectionOverride override = params.getSelectionOverride(i, rendererTrackGroups); definitions[i] = override == null ? null : new TrackSelection.Definition( rendererTrackGroups.get(override.groupIndex), override.tracks, override.reason, override.data); } } @NullableType TrackSelection[] rendererTrackSelections = trackSelectionFactory.createTrackSelections(definitions, getBandwidthMeter()); // Initialize the renderer configurations to the default configuration for all renderers with // selections, and null otherwise. @NullableType RendererConfiguration[] rendererConfigurations = new RendererConfiguration[rendererCount]; for (int i = 0; i < rendererCount; i++) { boolean forceRendererDisabled = params.getRendererDisabled(i); boolean rendererEnabled = !forceRendererDisabled && (mappedTrackInfo.getRendererType(i) == C.TRACK_TYPE_NONE || rendererTrackSelections[i] != null); rendererConfigurations[i] = rendererEnabled ? RendererConfiguration.DEFAULT : null; } // Configure audio and video renderers to use tunneling if appropriate. maybeConfigureRenderersForTunneling( mappedTrackInfo, rendererFormatSupports, rendererConfigurations, rendererTrackSelections, params.tunnelingAudioSessionId); return Pair.create(rendererConfigurations, rendererTrackSelections); }
Example 15
Source File: HlsSampleStreamWrapper.java From Telegram with GNU General Public License v2.0 | 4 votes |
/** * Builds tracks that are exposed by this {@link HlsSampleStreamWrapper} instance, as well as * internal data-structures required for operation. * * <p>Tracks in HLS are complicated. A HLS master playlist contains a number of "variants". Each * variant stream typically contains muxed video, audio and (possibly) additional audio, metadata * and caption tracks. We wish to allow the user to select between an adaptive track that spans * all variants, as well as each individual variant. If multiple audio tracks are present within * each variant then we wish to allow the user to select between those also. * * <p>To do this, tracks are constructed as follows. The {@link HlsChunkSource} exposes (N+1) * tracks, where N is the number of variants defined in the HLS master playlist. These consist of * one adaptive track defined to span all variants and a track for each individual variant. The * adaptive track is initially selected. The extractor is then prepared to discover the tracks * inside of each variant stream. The two sets of tracks are then combined by this method to * create a third set, which is the set exposed by this {@link HlsSampleStreamWrapper}: * * <ul> * <li>The extractor tracks are inspected to infer a "primary" track type. If a video track is * present then it is always the primary type. If not, audio is the primary type if present. * Else text is the primary type if present. Else there is no primary type. * <li>If there is exactly one extractor track of the primary type, it's expanded into (N+1) * exposed tracks, all of which correspond to the primary extractor track and each of which * corresponds to a different chunk source track. Selecting one of these tracks has the * effect of switching the selected track on the chunk source. * <li>All other extractor tracks are exposed directly. Selecting one of these tracks has the * effect of selecting an extractor track, leaving the selected track on the chunk source * unchanged. * </ul> */ private void buildTracksFromSampleStreams() { // Iterate through the extractor tracks to discover the "primary" track type, and the index // of the single track of this type. int primaryExtractorTrackType = C.TRACK_TYPE_NONE; int primaryExtractorTrackIndex = C.INDEX_UNSET; int extractorTrackCount = sampleQueues.length; for (int i = 0; i < extractorTrackCount; i++) { String sampleMimeType = sampleQueues[i].getUpstreamFormat().sampleMimeType; int trackType; if (MimeTypes.isVideo(sampleMimeType)) { trackType = C.TRACK_TYPE_VIDEO; } else if (MimeTypes.isAudio(sampleMimeType)) { trackType = C.TRACK_TYPE_AUDIO; } else if (MimeTypes.isText(sampleMimeType)) { trackType = C.TRACK_TYPE_TEXT; } else { trackType = C.TRACK_TYPE_NONE; } if (getTrackTypeScore(trackType) > getTrackTypeScore(primaryExtractorTrackType)) { primaryExtractorTrackType = trackType; primaryExtractorTrackIndex = i; } else if (trackType == primaryExtractorTrackType && primaryExtractorTrackIndex != C.INDEX_UNSET) { // We have multiple tracks of the primary type. We only want an index if there only exists a // single track of the primary type, so unset the index again. primaryExtractorTrackIndex = C.INDEX_UNSET; } } TrackGroup chunkSourceTrackGroup = chunkSource.getTrackGroup(); int chunkSourceTrackCount = chunkSourceTrackGroup.length; // Instantiate the necessary internal data-structures. primaryTrackGroupIndex = C.INDEX_UNSET; trackGroupToSampleQueueIndex = new int[extractorTrackCount]; for (int i = 0; i < extractorTrackCount; i++) { trackGroupToSampleQueueIndex[i] = i; } // Construct the set of exposed track groups. TrackGroup[] trackGroups = new TrackGroup[extractorTrackCount]; for (int i = 0; i < extractorTrackCount; i++) { Format sampleFormat = sampleQueues[i].getUpstreamFormat(); if (i == primaryExtractorTrackIndex) { Format[] formats = new Format[chunkSourceTrackCount]; if (chunkSourceTrackCount == 1) { formats[0] = sampleFormat.copyWithManifestFormatInfo(chunkSourceTrackGroup.getFormat(0)); } else { for (int j = 0; j < chunkSourceTrackCount; j++) { formats[j] = deriveFormat(chunkSourceTrackGroup.getFormat(j), sampleFormat, true); } } trackGroups[i] = new TrackGroup(formats); primaryTrackGroupIndex = i; } else { Format trackFormat = primaryExtractorTrackType == C.TRACK_TYPE_VIDEO && MimeTypes.isAudio(sampleFormat.sampleMimeType) ? muxedAudioFormat : null; trackGroups[i] = new TrackGroup(deriveFormat(trackFormat, sampleFormat, false)); } } this.trackGroups = new TrackGroupArray(trackGroups); Assertions.checkState(optionalTrackGroups == null); optionalTrackGroups = TrackGroupArray.EMPTY; }