FFmpeg学习 av_read_frame()函数分析

前言

本文分析av_read_frame函数,该函数在libavformat包下。

/**
 * Return the next frame of a stream.
 * This function returns what is stored in the file, and does not validate
 * that what is there are valid frames for the decoder. It will split what is
 * stored in the file into frames and return one for each call. It will not
 * omit invalid data between valid frames so as to give the decoder the maximum
 * information possible for decoding.
 *
 * If pkt->buf is NULL, then the packet is valid until the next
 * av_read_frame() or until avformat_close_input(). Otherwise the packet
 * is valid indefinitely. In both cases the packet must be freed with
 * av_packet_unref when it is no longer needed. For video, the packet contains
 * exactly one frame. For audio, it contains an integer number of frames if each
 * frame has a known fixed size (e.g. PCM or ADPCM data). If the audio frames
 * have a variable size (e.g. MPEG audio), then it contains one frame.
 *
 * pkt->pts, pkt->dts and pkt->duration are always set to correct
 * values in AVStream.time_base units (and guessed if the format cannot
 * provide them). pkt->pts can be AV_NOPTS_VALUE if the video format
 * has B-frames, so it is better to rely on pkt->dts if you do not
 * decompress the payload.
 *
 * @return 0 if OK, < 0 on error or end of file
 */
int av_read_frame(AVFormatContext *s, AVPacket *pkt);

调用流程如下:
av_read_frame
–> read_frame_internal
----> ff_read_packet
------> s->iformat->read_packet
----> av_parser_init
----> parse_packet
------> av_parser_parse2

av_read_frame

入口函数,从流中读取一个AVPacket,从相应格式中解封装出来的一个音频包或者视频包;
有packet_buffer的话从packet_buffer链表中获取,没有的话去读取一个包。

// utils.c
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
{
    const int genpts = s->flags & AVFMT_FLAG_GENPTS;
    int eof = 0;
    int ret;
    AVStream *st;

    if (!genpts) {
        // 一般都会走到这里,在find_stream_info中会存包到packet_buffer,此时会从packet_buffer中获取
        ret = s->internal->packet_buffer
              ? read_from_packet_buffer(&s->internal->packet_buffer, &s->internal->packet_buffer_end, pkt)
              : read_frame_internal(s, pkt);

        if (ret < 0)
            return ret;
        goto return_packet;
    }

    // xxx
return_packet:
    return ret;
}

read_frame_internal

  1. 调用ff_read_packet去读取一个包
  2. 读到后初始化parser且进行parse_packet,后将读取到的packet返回
static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
{
    int ret = 0, i, got_packet = 0;
    AVDictionary *metadata = NULL;
    
    // 清空packet
    av_init_packet(pkt);

    while (!got_packet && !s->internal->parse_queue) {
        AVStream *st;
        AVPacket cur_pkt;

        // 通过s->iformat->read_packet(s, pkt);按格式读取一个包
        ret = ff_read_packet(s, &cur_pkt);

        if (ret < 0) {
            if (ret == AVERROR(EAGAIN))
                return ret;
            /* flush the parsers */
            for (i = 0; i < s->nb_streams; i++) {
                st = s->streams[i];
                if (st->parser && st->need_parsing)
                    parse_packet(s, NULL, st->index);
            }
            /* all remaining packets are now in parse_queue =>
             * really terminate parsing */
            break;
        }

        ret = 0;
        st  = s->streams[cur_pkt.stream_index];

        // 初始化流的parse,st->parser,对h264来说就是即ff_h264_parser
        if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
            // 根据codec id找到parser
            st->parser = av_parser_init(st->codecpar->codec_id);

            if (!st->parser) {
                st->need_parsing = AVSTREAM_PARSE_NONE;
            } else if (st->need_parsing == AVSTREAM_PARSE_HEADERS)
                st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
            else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE)
                st->parser->flags |= PARSER_FLAG_ONCE;
            else if (st->need_parsing == AVSTREAM_PARSE_FULL_RAW)
                st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
        }


        if (!st->need_parsing || !st->parser) {
            /* no parsing needed: we just output the packet as is */
            *pkt = cur_pkt;
            got_packet = 1;
        } else if (st->discard < AVDISCARD_ALL) {
            // 去根据码流格式解析封装包
            if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
                return ret;
            st->codecpar->sample_rate = st->internal->avctx->sample_rate;
            st->codecpar->bit_rate = st->internal->avctx->bit_rate;
            st->codecpar->channels = st->internal->avctx->channels;
            st->codecpar->channel_layout = st->internal->avctx->channel_layout;
            st->codecpar->codec_id = st->internal->avctx->codec_id;
        } else {
            // 直接释放了
            av_packet_unref(&cur_pkt);
        }
      
        // 是关键帧的话
        if (pkt->flags & AV_PKT_FLAG_KEY)
            st->skip_to_keyframe = 0;

        if (st->skip_to_keyframe) {
            av_packet_unref(&cur_pkt);
            if (got_packet) {
                *pkt = cur_pkt;
            }
            got_packet = 0;
        }
    }

    // 从parse_queue再去获取packet
    if (!got_packet && s->internal->parse_queue)
        ret = read_from_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end, pkt);

    return ret;
}

 CSDN站内私信我(关注旁边,就是私信),领取最新最全C++音视频学习提升资料,内容包括(C/C++Linux 服务器开发,FFmpeg webRTC rtmp hls rtsp ffplay srs

 

ff_read_packet

代码贴的是无request_probe的,当request_probe不设置时,即始终是0,raw_packet_buffer始终是0,读到了就返回了;

是mp4格式时会设置request_probe,需要读取数据到raw_packet_buffer中,直到probe_codec成功为止;
是flv的话,request_probe一直是0,会通过flvdec.c#read_packet(s, pkt);读取包;当读取的是MetaData和第一个视频包(sps/pps)和第一个音频包(AAC sequence header)时会返回REDO,直到读到第二个视频包或音频包后返回。
在第一个视频包(sps/pps)和第一个音频包(AAC sequence header)时会根据协议设置相应的codec_id。

int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    int ret, i, err;
    AVStream *st;

    for (;;) {
        AVPacketList *pktl = s->internal->raw_packet_buffer;
        
        if (pktl) {
            *pkt = pktl->pkt;
            st   = s->streams[pkt->stream_index];
            if (s->internal->raw_packet_buffer_remaining_size <= 0)
                if ((err = probe_codec(s, st, NULL)) < 0)
                    return err;
            if (st->request_probe <= 0) {
                s->internal->raw_packet_buffer                 = pktl->next;
                s->internal->raw_packet_buffer_remaining_size += pkt->size;
                av_free(pktl);
                return 0;
            }
        }

        pkt->data = NULL;
        pkt->size = 0;
        av_init_packet(pkt);

        // 通过解封装器read_packet
        ret = s->iformat->read_packet(s, pkt);

        if (ret < 0) {
            /* Some demuxers return FFERROR_REDO when they consume
               data and discard it (ignored streams, junk, extradata).
               We must re-call the demuxer to get the real packet. */
           // 从flv日志看,是读到第二个音频包或第二个视频包为止,读metaData、第一个video包第一个audio包都返回REDO;
            if (ret == FFERROR_REDO)
                continue;
            if (!pktl || ret == AVERROR(EAGAIN))
                return ret;
            for (i = 0; i < s->nb_streams; i++) {
                st = s->streams[i];
                if (st->probe_packets || st->request_probe > 0)
                    if ((err = probe_codec(s, st, NULL)) < 0)
                        return err;
            }


            continue;
        }

        if (!pktl && st->request_probe <= 0)
            return ret;


        err = add_to_pktbuf(&s->internal->raw_packet_buffer, pkt, &s->internal->raw_packet_buffer_end, 0);
        if (err)
            return err;

        s->internal->raw_packet_buffer_remaining_size -= pkt->size;

        if ((err = probe_codec(s, st, pkt)) < 0)
            return err;
    }

}

s->iformat->read_packet
调用相应格式的read_packet,按相应协议从传输层读取一个包;

av_parser_init
遍历parser链表找到相应codec_id的parser。
如h264编码格式的h264_parser.c#ff_h264_parser。

AVCodecParser ff_h264_parser = {
    .codec_ids      = { AV_CODEC_ID_H264 },
    .priv_data_size = sizeof(H264ParseContext),
    .parser_init    = init,
    .parser_parse   = h264_parse,
    .parser_close   = h264_close,
    .split          = h264_split,
};
AVCodecParserContext *av_parser_init(int codec_id)
{
    AVCodecParserContext *s = NULL;
    AVCodecParser *parser;
    int ret;

    if (codec_id == AV_CODEC_ID_NONE)
        return NULL;

	// 遍历parser链表
    for (parser = av_first_parser; parser; parser = parser->next) {
        if (parser->codec_ids[0] == codec_id ||
            parser->codec_ids[1] == codec_id ||
            parser->codec_ids[2] == codec_id ||
            parser->codec_ids[3] == codec_id ||
            parser->codec_ids[4] == codec_id)
            goto found;
    }
    return NULL;

found:
	// 初始化AVCodecParserContext
    s = av_mallocz(sizeof(AVCodecParserContext));
    s->parser = parser;
    s->priv_data = av_mallocz(parser->priv_data_size);
    s->fetch_timestamp=1;
    s->pict_type = AV_PICTURE_TYPE_I;
    if (parser->parser_init) {
        ret = parser->parser_init(s);
    }
    s->key_frame            = -1;
    s->dts_sync_point       = INT_MIN;
    s->dts_ref_dts_delta    = INT_MIN;
    s->pts_dts_delta        = INT_MIN;
    s->format               = -1;

    return s;
}

parse_packet

调用av_parser_parse2解析一个音频包或视频包,分割出来后都添加到parse_queue里;
核心会调用s->parser->parser_parse去解析一个包,会走到ff_h264_parser里。

/**
 * Parse a packet, add all split parts to parse_queue.
 * @param pkt Packet to parse, NULL when flushing the parser at end of stream.
 */
static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
{
    AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
    AVStream *st = s->streams[stream_index];
    uint8_t *data = pkt ? pkt->data : NULL;
    int size      = pkt ? pkt->size : 0;
    int ret = 0, got_output = 0;

    if (!pkt) {
        av_init_packet(&flush_pkt);
        pkt        = &flush_pkt;
        got_output = 1;
    }

    while (size > 0 || (pkt == &flush_pkt && got_output)) {
        int len;
        int64_t next_pts = pkt->pts;
        int64_t next_dts = pkt->dts;

        // 解析pkt
        av_init_packet(&out_pkt);

        // 会调用s->parser->parser_parse,根据具体的流parser去解析一个包
        len = av_parser_parse2(st->parser, st->internal->avctx,
                               &out_pkt.data, &out_pkt.size, data, size,
                               pkt->pts, pkt->dts, pkt->pos);

        pkt->pts = pkt->dts = AV_NOPTS_VALUE;
        pkt->pos = -1;
        /* increment read pointer */
        data += len;
        size -= len;

        got_output = !!out_pkt.size;

        if (!out_pkt.size)
            continue;

        // ... 一系列赋值操作

        // 判断是否是关键帧
        if (st->parser->key_frame == 1 || (st->parser->key_frame == -1 && st->parser->pict_type == AV_PICTURE_TYPE_I))
            out_pkt.flags |= AV_PKT_FLAG_KEY;

        if (st->parser->key_frame == -1 && st->parser->pict_type ==AV_PICTURE_TYPE_NONE && (pkt->flags&AV_PKT_FLAG_KEY))
            out_pkt.flags |= AV_PKT_FLAG_KEY;

        compute_pkt_fields(s, st, st->parser, &out_pkt, next_dts, next_pts);

        // 添加parse_queue尾部
        ret = add_to_pktbuf(&s->internal->parse_queue, &out_pkt,
                            &s->internal->parse_queue_end, 1);
        av_packet_unref(&out_pkt);
        if (ret < 0)
            goto fail;
    }

    /* end of the stream => close and free the parser */
    if (pkt == &flush_pkt) {
        av_parser_close(st->parser);
        st->parser = NULL;
    }

fail:
    av_packet_unref(pkt);
    return ret;
}

av_parser_parse2

会调研s->parser->parser_parse会走到h264_parser.c文件中,按照H264格式解析。
h264后续会专门写文章分析,这里就贴一下它的结构体吧。

// h264_parse.c
AVCodecParser ff_h264_parser = {
    .codec_ids      = { AV_CODEC_ID_H264 },
    .priv_data_size = sizeof(H264ParseContext),
    .parser_init    = init,
    .parser_parse   = h264_parse,
    .parser_close   = h264_close,
    .split          = h264_split,
};

猜你喜欢

转载自blog.csdn.net/m0_60259116/article/details/125188548