org.bytedeco.javacpp.avutil.AVRational Java Examples

The following examples show how to use org.bytedeco.javacpp.avutil.AVRational. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: testRecorder.java    From easyCV with Apache License 2.0 6 votes vote down vote up
/**
 * 从输入音视频流获取音视频格式
 * 
 * @param avfc
 */
private void getInputParam(AVFormatContext avfc) {
	// get input video and audio stream indices from ifmt_ctx
	for (int idx = 0; idx < avfc.nb_streams(); idx++) {
		System.err.println("读取流参数:"+idx);
		AVStream stream = avfc.streams(idx);
		AVCodecParameters codecpar = stream.codecpar();
		if (codecpar.codec_type() == AVMEDIA_TYPE_VIDEO) {
			AVRational frame_rate = stream.r_frame_rate();
			if (frame_rate.num() != AV_NOPTS_VALUE && frame_rate.den() != 0) {
				this.frameRate = (frame_rate.num()) / (frame_rate.den());
			}
			this.videoindex=idx;
			this.in_videoCodecpar = codecpar;
			this.in_videoStream = stream;
		} else if (codecpar.codec_type() == AVMEDIA_TYPE_AUDIO) {
			this.in_audioCodecpar = codecpar;
			this.in_audioStream = stream;
			this.audioindex=idx;
		}
	}
}
 
Example #2
Source File: FFMpegVideoDecoder.java    From cineast with MIT License 4 votes vote down vote up
/**
 * Initializes the audio decoding part of FFMPEG.
 *
 * @param config The {@link DecoderConfig} used for configuring the {@link FFMpegVideoDecoder}.
 * @return True if a) audio decoder was initialized, b) number of channels is smaller than zero (no audio) or c) audio is unavailable or unsupported, false if initialization failed due to technical reasons.
 */
private boolean initAudio(DecoderConfig config) {
    /* Read decoder configuration. */
    int samplerate = config.namedAsInt(CONFIG_SAMPLERATE_PROPERTY, CONFIG_SAMPLERATE_DEFAULT);
    int channels = config.namedAsInt(CONFIG_CHANNELS_PROPERTY, CONFIG_CHANNELS_DEFAULT);
    long channellayout = av_get_default_channel_layout(channels);

    /* If number of channels is smaller or equal than zero; return true (no audio decoded). */
    if (channels <= 0) {
        LOGGER.info("Channel setting is smaller than zero. Continuing without audio!");
        this.audioComplete.set(true);
        return true;
    }

    /* Find the best frames stream. */
    final AVCodec codec = av_codec_next((AVCodec)null);
    this.audioStream = av_find_best_stream(this.pFormatCtx, AVMEDIA_TYPE_AUDIO,-1, -1, codec, 0);
    if (this.audioStream < 0) {
        LOGGER.warn("Couldn't find a supported audio stream. Continuing without audio!");
        this.audioComplete.set(true);
        return true;
    }

    /* Allocate new codec-context. */
    this.pCodecCtxAudio = avcodec_alloc_context3(codec);
    avcodec_parameters_to_context(this.pCodecCtxAudio, this.pFormatCtx.streams(this.audioStream).codecpar());

    /* Open the code context. */
    if (avcodec_open2(this.pCodecCtxAudio, codec, (AVDictionary)null) < 0) {
        LOGGER.error("Could not open audio codec. Continuing without audio!");
        this.audioComplete.set(true);
        return true;
    }

    /* Allocate the re-sample context. */
    this.swr_ctx = swr_alloc_set_opts(null, channellayout, TARGET_FORMAT, samplerate, this.pCodecCtxAudio.channel_layout(), this.pCodecCtxAudio.sample_fmt(), this.pCodecCtxAudio.sample_rate(), 0, null);
    if(swr_init(this.swr_ctx) < 0) {
        this.swr_ctx = null;
        LOGGER.warn("Could not open re-sample context - original format will be kept!");
    }

    /* Initialize decoded and resampled frame. */
    this.resampledFrame = av_frame_alloc();
    if (this.resampledFrame == null) {
        LOGGER.error("Could not allocate frame data structure for re-sampled data.");
        return false;
    }

    /* Initialize out-frame. */
    this.resampledFrame = av_frame_alloc();
    this.resampledFrame.channel_layout(channellayout);
    this.resampledFrame.sample_rate(samplerate);
    this.resampledFrame.channels(channels);
    this.resampledFrame.format(TARGET_FORMAT);

    /* Initialize the AudioDescriptor. */
    final AVRational timebase = this.pFormatCtx.streams(this.audioStream).time_base();
    final long duration = (1000L * timebase.num() * this.pFormatCtx.streams(this.audioStream).duration()/timebase.den());
    if (this.swr_ctx == null) {
        this.audioDescriptor = new AudioDescriptor(this.pCodecCtxAudio.sample_rate(), this.pCodecCtxAudio.channels(), duration);
    } else {
        this.audioDescriptor = new AudioDescriptor(this.resampledFrame.sample_rate(), this.resampledFrame.channels(), duration);
    }

    /* Completed initialization. */
    return true;
}
 
Example #3
Source File: FFMpegAudioDecoder.java    From cineast with MIT License 2 votes vote down vote up
/**
 * Returns the timestamp in milliseconds of the currently active frame. That timestamp is based on
 * a best-effort calculation by the FFMPEG decoder.
 *
 * @return Timestamp of the current frame.
 */
private Long getFrameTimestamp() {
    AVRational timebase = this.pFormatCtx.streams(this.audioStream).time_base();
    return Math.floorDiv((this.decodedFrame.best_effort_timestamp() * timebase.num() * 1000), timebase.den());
}
 
Example #4
Source File: FFMpegVideoDecoder.java    From cineast with MIT License 2 votes vote down vote up
/**
 * Returns the timestamp in milliseconds of the currently active frame. That timestamp is based on
 * a best-effort calculation by the FFMPEG decoder.
 *
 * @param stream Number of the stream. Determines the time base used to calculate the timestamp;
 * @return Timestamp of the current frame.
 */
private Long getFrameTimestamp(int stream) {
    AVRational timebase = this.pFormatCtx.streams(stream).time_base();
    return Math.floorDiv((this.pFrame.best_effort_timestamp() * timebase.num() * 1000), timebase.den());
}