Java Code Examples for com.google.android.exoplayer2.C#MICROS_PER_SECOND
The following examples show how to use
com.google.android.exoplayer2.C#MICROS_PER_SECOND .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DefaultAudioSink.java From Telegram with GNU General Public License v2.0 | 7 votes |
private int getDefaultBufferSize() { if (isInputPcm) { int minBufferSize = AudioTrack.getMinBufferSize(outputSampleRate, outputChannelConfig, outputEncoding); Assertions.checkState(minBufferSize != ERROR_BAD_VALUE); int multipliedBufferSize = minBufferSize * BUFFER_MULTIPLICATION_FACTOR; int minAppBufferSize = (int) durationUsToFrames(MIN_BUFFER_DURATION_US) * outputPcmFrameSize; int maxAppBufferSize = (int) Math.max( minBufferSize, durationUsToFrames(MAX_BUFFER_DURATION_US) * outputPcmFrameSize); return Util.constrainValue(multipliedBufferSize, minAppBufferSize, maxAppBufferSize); } else { int rate = getMaximumEncodedRateBytesPerSecond(outputEncoding); if (outputEncoding == C.ENCODING_AC3) { rate *= AC3_BUFFER_MULTIPLICATION_FACTOR; } return (int) (PASSTHROUGH_BUFFER_DURATION_US * rate / C.MICROS_PER_SECOND); } }
Example 2
Source File: TrackSelectionUtil.java From Telegram-FOSS with GNU General Public License v2.0 | 6 votes |
/** * Returns average bitrate for chunks in bits per second. Chunks are included in average until * {@code maxDurationMs} or the first unknown length chunk. * * @param iterator Iterator for media chunk sequences. * @param maxDurationUs Maximum duration of chunks to be included in average bitrate, in * microseconds. * @return Average bitrate for chunks in bits per second, or {@link Format#NO_VALUE} if there are * no chunks or the first chunk length is unknown. */ public static int getAverageBitrate(MediaChunkIterator iterator, long maxDurationUs) { long totalDurationUs = 0; long totalLength = 0; while (iterator.next()) { long chunkLength = iterator.getDataSpec().length; if (chunkLength == C.LENGTH_UNSET) { break; } long chunkDurationUs = iterator.getChunkEndTimeUs() - iterator.getChunkStartTimeUs(); if (totalDurationUs + chunkDurationUs >= maxDurationUs) { totalLength += chunkLength * (maxDurationUs - totalDurationUs) / chunkDurationUs; totalDurationUs = maxDurationUs; break; } totalDurationUs += chunkDurationUs; totalLength += chunkLength; } return totalDurationUs == 0 ? Format.NO_VALUE : (int) (totalLength * C.BITS_PER_BYTE * C.MICROS_PER_SECOND / totalDurationUs); }
Example 3
Source File: Ac3Reader.java From MediaSDK with Apache License 2.0 | 6 votes |
/** * Parses the sample header. */ @SuppressWarnings("ReferenceEquality") private void parseHeader() { headerScratchBits.setPosition(0); SyncFrameInfo frameInfo = Ac3Util.parseAc3SyncframeInfo(headerScratchBits); if (format == null || frameInfo.channelCount != format.channelCount || frameInfo.sampleRate != format.sampleRate || frameInfo.mimeType != format.sampleMimeType) { format = Format.createAudioSampleFormat(trackFormatId, frameInfo.mimeType, null, Format.NO_VALUE, Format.NO_VALUE, frameInfo.channelCount, frameInfo.sampleRate, null, null, 0, language); output.format(format); } sampleSize = frameInfo.frameSize; // In this class a sample is an access unit (syncframe in AC-3), but Format#sampleRate // specifies the number of PCM audio samples per second. sampleDurationUs = C.MICROS_PER_SECOND * frameInfo.sampleCount / format.sampleRate; }
Example 4
Source File: DefaultAudioSink.java From MediaSDK with Apache License 2.0 | 6 votes |
private int getDefaultBufferSize() { if (isInputPcm) { int minBufferSize = AudioTrack.getMinBufferSize(outputSampleRate, outputChannelConfig, outputEncoding); Assertions.checkState(minBufferSize != ERROR_BAD_VALUE); int multipliedBufferSize = minBufferSize * BUFFER_MULTIPLICATION_FACTOR; int minAppBufferSize = (int) durationUsToFrames(MIN_BUFFER_DURATION_US) * outputPcmFrameSize; int maxAppBufferSize = (int) Math.max( minBufferSize, durationUsToFrames(MAX_BUFFER_DURATION_US) * outputPcmFrameSize); return Util.constrainValue(multipliedBufferSize, minAppBufferSize, maxAppBufferSize); } else { int rate = getMaximumEncodedRateBytesPerSecond(outputEncoding); if (outputEncoding == C.ENCODING_AC3) { rate *= AC3_BUFFER_MULTIPLICATION_FACTOR; } return (int) (PASSTHROUGH_BUFFER_DURATION_US * rate / C.MICROS_PER_SECOND); } }
Example 5
Source File: Ac3Reader.java From TelePlus-Android with GNU General Public License v2.0 | 6 votes |
/** * Parses the sample header. */ @SuppressWarnings("ReferenceEquality") private void parseHeader() { headerScratchBits.setPosition(0); SyncFrameInfo frameInfo = Ac3Util.parseAc3SyncframeInfo(headerScratchBits); if (format == null || frameInfo.channelCount != format.channelCount || frameInfo.sampleRate != format.sampleRate || frameInfo.mimeType != format.sampleMimeType) { format = Format.createAudioSampleFormat(trackFormatId, frameInfo.mimeType, null, Format.NO_VALUE, Format.NO_VALUE, frameInfo.channelCount, frameInfo.sampleRate, null, null, 0, language); output.format(format); } sampleSize = frameInfo.frameSize; // In this class a sample is an access unit (syncframe in AC-3), but the MediaFormat sample rate // specifies the number of PCM audio samples per second. sampleDurationUs = C.MICROS_PER_SECOND * frameInfo.sampleCount / format.sampleRate; }
Example 6
Source File: Ac3Reader.java From K-Sonic with MIT License | 6 votes |
/** * Parses the sample header. */ private void parseHeader() { if (format == null) { // We read ahead to distinguish between AC-3 and E-AC-3. headerScratchBits.skipBits(40); isEac3 = headerScratchBits.readBits(5) == 16; headerScratchBits.setPosition(headerScratchBits.getPosition() - 45); format = isEac3 ? Ac3Util.parseEac3SyncframeFormat(headerScratchBits, trackFormatId, language , null) : Ac3Util.parseAc3SyncframeFormat(headerScratchBits, trackFormatId, language, null); output.format(format); } sampleSize = isEac3 ? Ac3Util.parseEAc3SyncframeSize(headerScratchBits.data) : Ac3Util.parseAc3SyncframeSize(headerScratchBits.data); int audioSamplesPerSyncframe = isEac3 ? Ac3Util.parseEAc3SyncframeAudioSampleCount(headerScratchBits.data) : Ac3Util.getAc3SyncframeAudioSampleCount(); // In this class a sample is an access unit (syncframe in AC-3), but the MediaFormat sample rate // specifies the number of PCM audio samples per second. sampleDurationUs = (int) (C.MICROS_PER_SECOND * audioSamplesPerSyncframe / format.sampleRate); }
Example 7
Source File: SegmentBase.java From TelePlus-Android with GNU General Public License v2.0 | 5 votes |
/** @see DashSegmentIndex#getSegmentNum(long, long) */ public long getSegmentNum(long timeUs, long periodDurationUs) { final long firstSegmentNum = getFirstSegmentNum(); final long segmentCount = getSegmentCount(periodDurationUs); if (segmentCount == 0) { return firstSegmentNum; } if (segmentTimeline == null) { // All segments are of equal duration (with the possible exception of the last one). long durationUs = (duration * C.MICROS_PER_SECOND) / timescale; long segmentNum = startNumber + timeUs / durationUs; // Ensure we stay within bounds. return segmentNum < firstSegmentNum ? firstSegmentNum : segmentCount == DashSegmentIndex.INDEX_UNBOUNDED ? segmentNum : Math.min(segmentNum, firstSegmentNum + segmentCount - 1); } else { // The index cannot be unbounded. Identify the segment using binary search. long lowIndex = firstSegmentNum; long highIndex = firstSegmentNum + segmentCount - 1; while (lowIndex <= highIndex) { long midIndex = lowIndex + (highIndex - lowIndex) / 2; long midTimeUs = getSegmentTimeUs(midIndex); if (midTimeUs < timeUs) { lowIndex = midIndex + 1; } else if (midTimeUs > timeUs) { highIndex = midIndex - 1; } else { return midIndex; } } return lowIndex == firstSegmentNum ? lowIndex : highIndex; } }
Example 8
Source File: MpegAudioReader.java From MediaSDK with Apache License 2.0 | 5 votes |
/** * Attempts to read the remaining two bytes of the frame header. * <p> * If a frame header is read in full then the state is changed to {@link #STATE_READING_FRAME}, * the media format is output if this has not previously occurred, the four header bytes are * output as sample data, and the position of the source is advanced to the byte that immediately * follows the header. * <p> * If a frame header is read in full but cannot be parsed then the state is changed to * {@link #STATE_READING_HEADER}. * <p> * If a frame header is not read in full then the position of the source is advanced to the limit, * and the method should be called again with the next source to continue the read. * * @param source The source from which to read. */ private void readHeaderRemainder(ParsableByteArray source) { int bytesToRead = Math.min(source.bytesLeft(), HEADER_SIZE - frameBytesRead); source.readBytes(headerScratch.data, frameBytesRead, bytesToRead); frameBytesRead += bytesToRead; if (frameBytesRead < HEADER_SIZE) { // We haven't read the whole header yet. return; } headerScratch.setPosition(0); boolean parsedHeader = MpegAudioHeader.populateHeader(headerScratch.readInt(), header); if (!parsedHeader) { // We thought we'd located a frame header, but we hadn't. frameBytesRead = 0; state = STATE_READING_HEADER; return; } frameSize = header.frameSize; if (!hasOutputFormat) { frameDurationUs = (C.MICROS_PER_SECOND * header.samplesPerFrame) / header.sampleRate; Format format = Format.createAudioSampleFormat(formatId, header.mimeType, null, Format.NO_VALUE, MpegAudioHeader.MAX_FRAME_SIZE_BYTES, header.channels, header.sampleRate, null, null, 0, language); output.format(format); hasOutputFormat = true; } headerScratch.setPosition(0); output.sampleData(headerScratch, HEADER_SIZE); state = STATE_READING_FRAME; }
Example 9
Source File: Mp3Extractor.java From Telegram-FOSS with GNU General Public License v2.0 | 5 votes |
private int readSample(ExtractorInput extractorInput) throws IOException, InterruptedException { if (sampleBytesRemaining == 0) { extractorInput.resetPeekPosition(); if (peekEndOfStreamOrHeader(extractorInput)) { return RESULT_END_OF_INPUT; } scratch.setPosition(0); int sampleHeaderData = scratch.readInt(); if (!headersMatch(sampleHeaderData, synchronizedHeaderData) || MpegAudioHeader.getFrameSize(sampleHeaderData) == C.LENGTH_UNSET) { // We have lost synchronization, so attempt to resynchronize starting at the next byte. extractorInput.skipFully(1); synchronizedHeaderData = 0; return RESULT_CONTINUE; } MpegAudioHeader.populateHeader(sampleHeaderData, synchronizedHeader); if (basisTimeUs == C.TIME_UNSET) { basisTimeUs = seeker.getTimeUs(extractorInput.getPosition()); if (forcedFirstSampleTimestampUs != C.TIME_UNSET) { long embeddedFirstSampleTimestampUs = seeker.getTimeUs(0); basisTimeUs += forcedFirstSampleTimestampUs - embeddedFirstSampleTimestampUs; } } sampleBytesRemaining = synchronizedHeader.frameSize; } int bytesAppended = trackOutput.sampleData(extractorInput, sampleBytesRemaining, true); if (bytesAppended == C.RESULT_END_OF_INPUT) { return RESULT_END_OF_INPUT; } sampleBytesRemaining -= bytesAppended; if (sampleBytesRemaining > 0) { return RESULT_CONTINUE; } long timeUs = basisTimeUs + (samplesRead * C.MICROS_PER_SECOND / synchronizedHeader.sampleRate); trackOutput.sampleMetadata(timeUs, C.BUFFER_FLAG_KEY_FRAME, synchronizedHeader.frameSize, 0, null); samplesRead += synchronizedHeader.samplesPerFrame; sampleBytesRemaining = 0; return RESULT_CONTINUE; }
Example 10
Source File: SegmentBase.java From Telegram-FOSS with GNU General Public License v2.0 | 5 votes |
/** @see DashSegmentIndex#getDurationUs(long, long) */ public final long getSegmentDurationUs(long sequenceNumber, long periodDurationUs) { if (segmentTimeline != null) { long duration = segmentTimeline.get((int) (sequenceNumber - startNumber)).duration; return (duration * C.MICROS_PER_SECOND) / timescale; } else { int segmentCount = getSegmentCount(periodDurationUs); return segmentCount != DashSegmentIndex.INDEX_UNBOUNDED && sequenceNumber == (getFirstSegmentNum() + segmentCount - 1) ? (periodDurationUs - getSegmentTimeUs(sequenceNumber)) : ((duration * C.MICROS_PER_SECOND) / timescale); } }
Example 11
Source File: SegmentBase.java From K-Sonic with MIT License | 5 votes |
/** * @see DashSegmentIndex#getDurationUs(int, long) */ public final long getSegmentDurationUs(int sequenceNumber, long periodDurationUs) { if (segmentTimeline != null) { long duration = segmentTimeline.get(sequenceNumber - startNumber).duration; return (duration * C.MICROS_PER_SECOND) / timescale; } else { int segmentCount = getSegmentCount(periodDurationUs); return segmentCount != DashSegmentIndex.INDEX_UNBOUNDED && sequenceNumber == (getFirstSegmentNum() + segmentCount - 1) ? (periodDurationUs - getSegmentTimeUs(sequenceNumber)) : ((duration * C.MICROS_PER_SECOND) / timescale); } }
Example 12
Source File: Mp3Extractor.java From Telegram with GNU General Public License v2.0 | 5 votes |
private int readSample(ExtractorInput extractorInput) throws IOException, InterruptedException { if (sampleBytesRemaining == 0) { extractorInput.resetPeekPosition(); if (peekEndOfStreamOrHeader(extractorInput)) { return RESULT_END_OF_INPUT; } scratch.setPosition(0); int sampleHeaderData = scratch.readInt(); if (!headersMatch(sampleHeaderData, synchronizedHeaderData) || MpegAudioHeader.getFrameSize(sampleHeaderData) == C.LENGTH_UNSET) { // We have lost synchronization, so attempt to resynchronize starting at the next byte. extractorInput.skipFully(1); synchronizedHeaderData = 0; return RESULT_CONTINUE; } MpegAudioHeader.populateHeader(sampleHeaderData, synchronizedHeader); if (basisTimeUs == C.TIME_UNSET) { basisTimeUs = seeker.getTimeUs(extractorInput.getPosition()); if (forcedFirstSampleTimestampUs != C.TIME_UNSET) { long embeddedFirstSampleTimestampUs = seeker.getTimeUs(0); basisTimeUs += forcedFirstSampleTimestampUs - embeddedFirstSampleTimestampUs; } } sampleBytesRemaining = synchronizedHeader.frameSize; } int bytesAppended = trackOutput.sampleData(extractorInput, sampleBytesRemaining, true); if (bytesAppended == C.RESULT_END_OF_INPUT) { return RESULT_END_OF_INPUT; } sampleBytesRemaining -= bytesAppended; if (sampleBytesRemaining > 0) { return RESULT_CONTINUE; } long timeUs = basisTimeUs + (samplesRead * C.MICROS_PER_SECOND / synchronizedHeader.sampleRate); trackOutput.sampleMetadata(timeUs, C.BUFFER_FLAG_KEY_FRAME, synchronizedHeader.frameSize, 0, null); samplesRead += synchronizedHeader.samplesPerFrame; sampleBytesRemaining = 0; return RESULT_CONTINUE; }
Example 13
Source File: SsaDecoder.java From Telegram with GNU General Public License v2.0 | 5 votes |
/** * Parses an SSA timecode string. * * @param timeString The string to parse. * @return The parsed timestamp in microseconds. */ public static long parseTimecodeUs(String timeString) { Matcher matcher = SSA_TIMECODE_PATTERN.matcher(timeString); if (!matcher.matches()) { return C.TIME_UNSET; } long timestampUs = Long.parseLong(matcher.group(1)) * 60 * 60 * C.MICROS_PER_SECOND; timestampUs += Long.parseLong(matcher.group(2)) * 60 * C.MICROS_PER_SECOND; timestampUs += Long.parseLong(matcher.group(3)) * C.MICROS_PER_SECOND; timestampUs += Long.parseLong(matcher.group(4)) * 10000; // 100ths of a second. return timestampUs; }
Example 14
Source File: AdtsReader.java From TelePlus-Android with GNU General Public License v2.0 | 4 votes |
/** * Parses the sample header. */ private void parseAdtsHeader() throws ParserException { adtsScratch.setPosition(0); if (!hasOutputFormat) { int audioObjectType = adtsScratch.readBits(2) + 1; if (audioObjectType != 2) { // The stream indicates AAC-Main (1), AAC-SSR (3) or AAC-LTP (4). When the stream indicates // AAC-Main it's more likely that the stream contains HE-AAC (5), which cannot be // represented correctly in the 2 bit audio_object_type field in the ADTS header. In // practice when the stream indicates AAC-SSR or AAC-LTP it more commonly contains AAC-LC or // HE-AAC. Since most Android devices don't support AAC-Main, AAC-SSR or AAC-LTP, and since // indicating AAC-LC works for HE-AAC streams, we pretend that we're dealing with AAC-LC and // hope for the best. In practice this often works. // See: https://github.com/google/ExoPlayer/issues/774 // See: https://github.com/google/ExoPlayer/issues/1383 Log.w(TAG, "Detected audio object type: " + audioObjectType + ", but assuming AAC LC."); audioObjectType = 2; } int sampleRateIndex = adtsScratch.readBits(4); adtsScratch.skipBits(1); int channelConfig = adtsScratch.readBits(3); byte[] audioSpecificConfig = CodecSpecificDataUtil.buildAacAudioSpecificConfig( audioObjectType, sampleRateIndex, channelConfig); Pair<Integer, Integer> audioParams = CodecSpecificDataUtil.parseAacAudioSpecificConfig( audioSpecificConfig); Format format = Format.createAudioSampleFormat(formatId, MimeTypes.AUDIO_AAC, null, Format.NO_VALUE, Format.NO_VALUE, audioParams.second, audioParams.first, Collections.singletonList(audioSpecificConfig), null, 0, language); // In this class a sample is an access unit, but the MediaFormat sample rate specifies the // number of PCM audio samples per second. sampleDurationUs = (C.MICROS_PER_SECOND * 1024) / format.sampleRate; output.format(format); hasOutputFormat = true; } else { adtsScratch.skipBits(10); } adtsScratch.skipBits(4); int sampleSize = adtsScratch.readBits(13) - 2 /* the sync word */ - HEADER_SIZE; if (hasCrc) { sampleSize -= CRC_SIZE; } setReadingSampleState(output, sampleDurationUs, 0, sampleSize); }
Example 15
Source File: AdtsReader.java From TelePlus-Android with GNU General Public License v2.0 | 4 votes |
/** * Parses the sample header. */ private void parseAdtsHeader() throws ParserException { adtsScratch.setPosition(0); if (!hasOutputFormat) { int audioObjectType = adtsScratch.readBits(2) + 1; if (audioObjectType != 2) { // The stream indicates AAC-Main (1), AAC-SSR (3) or AAC-LTP (4). When the stream indicates // AAC-Main it's more likely that the stream contains HE-AAC (5), which cannot be // represented correctly in the 2 bit audio_object_type field in the ADTS header. In // practice when the stream indicates AAC-SSR or AAC-LTP it more commonly contains AAC-LC or // HE-AAC. Since most Android devices don't support AAC-Main, AAC-SSR or AAC-LTP, and since // indicating AAC-LC works for HE-AAC streams, we pretend that we're dealing with AAC-LC and // hope for the best. In practice this often works. // See: https://github.com/google/ExoPlayer/issues/774 // See: https://github.com/google/ExoPlayer/issues/1383 Log.w(TAG, "Detected audio object type: " + audioObjectType + ", but assuming AAC LC."); audioObjectType = 2; } int sampleRateIndex = adtsScratch.readBits(4); adtsScratch.skipBits(1); int channelConfig = adtsScratch.readBits(3); byte[] audioSpecificConfig = CodecSpecificDataUtil.buildAacAudioSpecificConfig( audioObjectType, sampleRateIndex, channelConfig); Pair<Integer, Integer> audioParams = CodecSpecificDataUtil.parseAacAudioSpecificConfig( audioSpecificConfig); Format format = Format.createAudioSampleFormat(formatId, MimeTypes.AUDIO_AAC, null, Format.NO_VALUE, Format.NO_VALUE, audioParams.second, audioParams.first, Collections.singletonList(audioSpecificConfig), null, 0, language); // In this class a sample is an access unit, but the MediaFormat sample rate specifies the // number of PCM audio samples per second. sampleDurationUs = (C.MICROS_PER_SECOND * 1024) / format.sampleRate; output.format(format); hasOutputFormat = true; } else { adtsScratch.skipBits(10); } adtsScratch.skipBits(4); int sampleSize = adtsScratch.readBits(13) - 2 /* the sync word */ - HEADER_SIZE; if (hasCrc) { sampleSize -= CRC_SIZE; } setReadingSampleState(output, sampleDurationUs, 0, sampleSize); }
Example 16
Source File: AudioTrack.java From K-Sonic with MIT License | 4 votes |
/** * Returns {@link #getPlaybackHeadPosition()} expressed as microseconds. */ public long getPlaybackHeadPositionUs() { return (getPlaybackHeadPosition() * C.MICROS_PER_SECOND) / sampleRate; }
Example 17
Source File: StreamReader.java From TelePlus-Android with GNU General Public License v2.0 | 2 votes |
/** * Converts granule value to time. * * @param granule The granule value. * @return Time in milliseconds. */ protected long convertGranuleToTime(long granule) { return (granule * C.MICROS_PER_SECOND) / sampleRate; }
Example 18
Source File: WavHeader.java From MediaSDK with Apache License 2.0 | 2 votes |
/** * Returns the time in microseconds for the given position in bytes. * * @param position The position in bytes. */ public long getTimeUs(long position) { long positionOffset = Math.max(0, position - dataStartPosition); return (positionOffset * C.MICROS_PER_SECOND) / averageBytesPerSecond; }
Example 19
Source File: TimestampAdjuster.java From MediaSDK with Apache License 2.0 | 2 votes |
/** * Converts a 90 kHz clock timestamp to a timestamp in microseconds. * * @param pts A 90 kHz clock timestamp. * @return The corresponding value in microseconds. */ public static long ptsToUs(long pts) { return (pts * C.MICROS_PER_SECOND) / 90000; }
Example 20
Source File: TimestampAdjuster.java From Telegram with GNU General Public License v2.0 | 2 votes |
/** * Converts a 90 kHz clock timestamp to a timestamp in microseconds. * * @param pts A 90 kHz clock timestamp. * @return The corresponding value in microseconds. */ public static long ptsToUs(long pts) { return (pts * C.MICROS_PER_SECOND) / 90000; }