ffplay播放器剖析(2)----读取线程刨析

1.读取线程流程

1.1 准备工作

  1. avformat_alloc_context创建上下文

  2. ic->interupt_callback.callback=decode_interrupt_cb 绑定回调函数

  3. avformat_open_input 打开媒体文件

  4. avformat_find_stream_info 读取媒体文件的包获取更多的stream信息

  5. 检测指定播放起始时间,如果seek到指定位置就使用avformat_seek_file

  6. 检查查找AVStream,将对应的index值记录到st_index[AVMEDIA_TYPE_NB];

    a.根据用户指定来查找流 avformat_match_stream_specifier

    b.使用av_find_best_stream 查找流

  7. 从待处理流中获取相关参数信息,来设置显示窗口的宽度,高度及宽高.

  8. stream_component_open 打开音频,视频,字母解码器,并创建相应的解码线程以及进行相应的参数初始化

1.2 For循环读取数据

1.检查是否退出

2.检查是否暂停/继续

3.检查是否需要seek

4.检查video是否为attached_pic

5.检查队列是否有足够的数据

6.检查码流是否播放结束

​ a.是否循环播放

​ b.是否自动退出

7.使用av_read_frame读取数据包

8.检测数据是否读取完毕

9.检查是否在播放范围内

10.将数据放入对应队列中

1.3退出线程处理

1.如果解复用器打开成功则调用avformat_close_input关闭

2.调用SDL_PushEvent发送退出事件FF_QUIT_EVENT

3.释放互斥量wait_mutex

2.准备工作

2.1 调用avformat_alloc_context创建上下文

ic=avformat_alloc_context;
is->ic=ic;//将创建好的ic赋值给VideoState管理

2.2 ic->interrupt_callback

    /* 2.设置中断回调函数,如果出错或者退出,就根据目前程序设置的状态选择继续check或者直接退出 */
    /* 当执行耗时操作时(一般是在执行while或者for循环的数据读取时),会调用interrupt_callback.callback
     * 回调函数中返回1则代表ffmpeg结束耗时操作退出当前函数的调用
     * 回调函数中返回0则代表ffmpeg内部继续执行耗时操作,直到完成既定的任务(比如读取到既定的数据包)
     */
    ic->interrupt_callback.callback = decode_interrupt_cb;
    ic->interrupt_callback.opaque = is;

2.3 avformat_open_input 打开媒体文件/媒体流

int avformat_open_input(AVFormatContext **ps, const char *url, ff_const59 AVInputFormat *fmt, AVDictionary **options);

该函数用于打开输入媒体(可以是文件,也可以是实时流),其中的fmt是可以强制指定解码格式,options是设置参数,设置的参数都会在选择的解码器中使用.

2.4 avformat_find_stream_info

该函数是获取完整流信息的.

为什么通过AVFormatContext可以获取流信息,还需要avformat_find_stream_info函数呢?

因为有些媒体文件缺少部分头信息,导致获取流信息失败,而avformat_find_stream_info函数会读取媒体文件中的部分数据进行分析流信息.

2.5 检测是否指定其实播放时间

如果指定时间则seek到指定位置,使用avformat_seek_file函数.

可以通过 -ss 选项设置,格式为hh:mm:ss

    /* if seeking requested, we execute it */
    /* 5. 检测是否指定播放起始时间 */
    if (start_time != AV_NOPTS_VALUE) {
    
    
        int64_t timestamp;

        timestamp = start_time;
        /* add the stream start time */
        if (ic->start_time != AV_NOPTS_VALUE)
            timestamp += ic->start_time;
        // seek的指定的位置开始播放
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
        if (ret < 0) {
    
    
            av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
                   is->filename, (double)timestamp / AV_TIME_BASE);
        }
    }

2.6 查找AVStream

⼀个媒体⽂件,对应有0n个⾳频流、0n个视频流、0~n个字幕流

具体现在那个流进⾏播放我们有两种策略:

  1. 在播放起始指定对应的流

    {
          
           "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, {
          
           &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
    {
          
           "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, {
          
           &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
    {
          
           "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, {
          
           &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }
    

    -ast是选择音频流

    -vst是选择视频流

    -sst是选择字幕流

  2. 使⽤缺省的流进⾏播放

使用缺省流播放是使用av_find_best_stream函数进行查找流的index,然后赋值给对应的wanted_stream_spec数组

    if (!video_disable)
        st_index[AVMEDIA_TYPE_VIDEO] =av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
                                      st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
    if (!audio_disable)
        st_index[AVMEDIA_TYPE_AUDIO] =av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
                                	  st_index[AVMEDIA_TYPE_AUDIO],
                                      st_index[AVMEDIA_TYPE_VIDEO],
                                      NULL, 0);
    if (!video_disable && !subtitle_disable)
        st_index[AVMEDIA_TYPE_SUBTITLE] =av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
                                         st_index[AVMEDIA_TYPE_SUBTITLE],
                                         (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
                                         st_index[AVMEDIA_TYPE_AUDIO] :
                                         st_index[AVMEDIA_TYPE_VIDEO]),NULL, 0);

2.7 通过AVCodecParameters和av_guess_sample_aspect_ratio计算出显示窗口的宽、高及宽高比

    //7 从待处理流中获取相关参数,设置显示窗口的宽度、高度及宽高比
    if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
    
    
        AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
        AVCodecParameters *codecpar = st->codecpar;
        //根据流和帧宽高比猜测视频帧的像素宽高比(像素的宽高比,注意不是图像的)
        AVRational sar = av_guess_sample_aspect_ratio(ic, st, NULL);
        if (codecpar->width) {
    
    
            // 设置显示窗口的大小和宽高比
            set_default_window_size(codecpar->width, codecpar->height, sar);
        }
    }

2.8 stream_component_open打开解码线程

 /* 8. 打开视频、音频解码器。在此会打开相应解码器,并创建相应的解码线程。 */
    if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
    
    // 如果有音频流则打开音频流
        stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
    }

    ret = -1;
    if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
    
     // 如果有视频流则打开视频流
        ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
    }
    if (is->show_mode == SHOW_MODE_NONE) {
    
    
        //选择怎么显示,如果视频打开成功,就显示视频画面,否则,显示音频对应的频谱图
        is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
    }

    if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
    
     // 如果有字幕流则打开字幕流
        stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
    }

stream_component_open函数:

static int stream_component_open(VideoState *is, int stream_index)
{
    
    
    AVFormatContext *ic = is->ic;
    AVCodecContext *avctx;
    AVCodec *codec;
    const char *forced_codec_name = NULL;
    AVDictionary *opts = NULL;
    AVDictionaryEntry *t = NULL;
    int sample_rate, nb_channels;
    int64_t channel_layout;
    int ret = 0;
    int stream_lowres = lowres;

    if (stream_index < 0 || stream_index >= ic->nb_streams)
        return -1;
    /*  为解码器分配一个编解码器上下文结构体 */
    avctx = avcodec_alloc_context3(NULL);
    if (!avctx)
        return AVERROR(ENOMEM);
    /* 将码流中的编解码器信息拷贝到新分配的编解码器上下文结构体 */
    ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
    if (ret < 0)
        goto fail;
    // 设置pkt_timebase
    avctx->pkt_timebase = ic->streams[stream_index]->time_base;

    /* 根据codec_id查找解码器 */
    codec = avcodec_find_decoder(avctx->codec_id);
	//forced_codec_name就是赋值对应的audio_codec_name/subtitle_codec_name/video_codec_name,来指定编码器的,当命令参数中指定编码器时该选项生效
    switch(avctx->codec_type){
    
    
    case AVMEDIA_TYPE_AUDIO   : is->last_audio_stream    = stream_index;
        forced_codec_name =    audio_codec_name/; break;
    case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index;
        forced_codec_name = subtitle_codec_name; break;
    case AVMEDIA_TYPE_VIDEO   : is->last_video_stream    = stream_index;
        forced_codec_name =    video_codec_name; break;
    }
    if (forced_codec_name)
        codec = avcodec_find_decoder_by_name(forced_codec_name);
    if (!codec) {
    
    
        if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
                   "No codec could be found with name '%s'\n", forced_codec_name);
        else                   av_log(NULL, AV_LOG_WARNING,
                   "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
        ret = AVERROR(EINVAL);
        goto fail;
    }

    avctx->codec_id = codec->id;
    if (stream_lowres > codec->max_lowres) {
    
    
        av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
               codec->max_lowres);
        stream_lowres = codec->max_lowres;
    }
    avctx->lowres = stream_lowres;

    if (fast)
        avctx->flags2 |= AV_CODEC_FLAG2_FAST;

    opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
    if (!av_dict_get(opts, "threads", NULL, 0))
        av_dict_set(&opts, "threads", "auto", 0);
    if (stream_lowres)
        av_dict_set_int(&opts, "lowres", stream_lowres, 0);
    if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
        av_dict_set(&opts, "refcounted_frames", "1", 0);
    if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
    
    
        goto fail;
    }
    if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
    
    
        av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
        ret =  AVERROR_OPTION_NOT_FOUND;
        goto fail;
    }

    is->eof = 0;
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
    switch (avctx->codec_type) {
    
    
    case AVMEDIA_TYPE_AUDIO:
#if CONFIG_AVFILTER
    {
    
    
        AVFilterContext *sink;

        is->audio_filter_src.freq           = avctx->sample_rate;
        is->audio_filter_src.channels       = avctx->channels;
        is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
        is->audio_filter_src.fmt            = avctx->sample_fmt;
        if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
            goto fail;
        sink = is->out_audio_filter;
        sample_rate    = av_buffersink_get_sample_rate(sink);
        nb_channels    = av_buffersink_get_channels(sink);
        channel_layout = av_buffersink_get_channel_layout(sink);
    }
#else
        //从avctx(即AVCodecContext)中获取音频格式参数
        sample_rate    = avctx->sample_rate;
        nb_channels    = avctx->channels;
        channel_layout = avctx->channel_layout;
#endif

        /* prepare audio output 准备音频输出*/
        //调用audio_open打开sdl音频输出,实际打开的设备参数保存在audio_tgt,返回值表示输出设备的缓冲区大小
        if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
            goto fail;
        is->audio_hw_buf_size = ret;
        is->audio_src = is->audio_tgt;  //暂且将数据源参数等同于目标输出参数
        //初始化audio_buf相关参数
        is->audio_buf_size  = 0;
        is->audio_buf_index = 0;

        /* init averaging filter 初始化averaging滤镜, 非audio master时使用 */
        is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB); //0.794  exp,高等数学里以自然常数e为底的指数函数
        is->audio_diff_avg_count = 0;
        /* 由于我们没有精确的音频数据填充FIFO,故只有在大于该阈值时才进行校正音频同步*/
        is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;

        is->audio_stream = stream_index;    // 获取audio的stream索引
        is->audio_st = ic->streams[stream_index];  // 获取audio的stream指针
        // 初始化ffplay封装的音频解码器
        decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
        if ((is->ic->iformat->flags & (AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK)) && !is->ic->iformat->read_seek) {
    
    
            is->auddec.start_pts = is->audio_st->start_time;
            is->auddec.start_pts_tb = is->audio_st->time_base;
        }
        // 启动音频解码线程
        if ((ret = decoder_start(&is->auddec, audio_thread, "audio_decoder", is)) < 0)
            goto out;
        SDL_PauseAudioDevice(audio_dev, 0);
        break;
    case AVMEDIA_TYPE_VIDEO:
        is->video_stream = stream_index;    // 获取video的stream索引
        is->video_st = ic->streams[stream_index];// 获取video的stream指针
        // 初始化ffplay封装的视频解码器
        decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
        // 启动视频频解码线程
        if ((ret = decoder_start(&is->viddec, video_thread, "video_decoder", is)) < 0)
            goto out;
        is->queue_attachments_req = 1; // 使能请求mp3、aac等音频文件的封面
        break;
    case AVMEDIA_TYPE_SUBTITLE: // 视频是类似逻辑处理
        is->subtitle_stream = stream_index;
        is->subtitle_st = ic->streams[stream_index];

        decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
        if ((ret = decoder_start(&is->subdec, subtitle_thread, "subtitle_decoder", is)) < 0)
            goto out;
        break;
    default:
        break;
    }
    goto out;

fail:
    avcodec_free_context(&avctx);
out:
    av_dict_free(&opts);

    return ret;
}

通过流的编码器信息赋值给编码器上下文后,通过其解码器信息中的id通过avcodec_find_decoder查找编码器,然后如果用户指定编码器的话,就通过avcodec_find_decoder_by_name使用用户指定的解码器,然后调用avcodec_open2打开解码器.

然后接下来就是根据流的类型进行将解码器的参数赋值到VideoState中,然后启动解码线程

3.For循环读取数据

3.1 检查是否退出

        // 1 检测是否退出
        if (is->abort_request)
            break;

3.2 检查是否暂停/继续

        // 2 检测是否暂停/继续
        if (is->paused != is->last_paused) {
    
    
            is->last_paused = is->paused;
            if (is->paused)
                is->read_pause_return = av_read_pause(ic); // 网络流的时候有用
            else
                av_read_play(ic);
        }

其中的av_read_pause和av_read_play是会调用到对应的网络流暂停和继续的函数,如RTSP的

static int rtsp_read_pause(AVFormatContext *s)
{
    
    
    RTSPState *rt = s->priv_data;
    RTSPMessageHeader reply1, *reply = &reply1;

    if (rt->state != RTSP_STATE_STREAMING)
        return 0;
    else if (!(rt->server_type == RTSP_SERVER_REAL && rt->need_subscription)) {
    
    
        ff_rtsp_send_cmd(s, "PAUSE", rt->control_uri, NULL, reply, NULL);
        if (reply->status_code != RTSP_STATUS_OK) {
    
    
            return ff_rtsp_averror(reply->status_code, -1);
        }
    }
    rt->state = RTSP_STATE_PAUSED;
    return 0;
}
static int rtsp_read_play(AVFormatContext *s)
{
    
    
    RTSPState *rt = s->priv_data;
    RTSPMessageHeader reply1, *reply = &reply1;
    int i;
    char cmd[1024];

    av_log(s, AV_LOG_DEBUG, "hello state=%d\n", rt->state);
    rt->nb_byes = 0;

    if (rt->lower_transport == RTSP_LOWER_TRANSPORT_UDP) {
    
    
        for (i = 0; i < rt->nb_rtsp_streams; i++) {
    
    
            RTSPStream *rtsp_st = rt->rtsp_streams[i];
            /* Try to initialize the connection state in a
             * potential NAT router by sending dummy packets.
             * RTP/RTCP dummy packets are used for RDT, too.
             */
            if (rtsp_st->rtp_handle &&
                !(rt->server_type == RTSP_SERVER_WMS && i > 1))
                ff_rtp_send_punch_packets(rtsp_st->rtp_handle);
        }
    }
    if (!(rt->server_type == RTSP_SERVER_REAL && rt->need_subscription)) {
    
    
        if (rt->transport == RTSP_TRANSPORT_RTP) {
    
    
            for (i = 0; i < rt->nb_rtsp_streams; i++) {
    
    
                RTSPStream *rtsp_st = rt->rtsp_streams[i];
                RTPDemuxContext *rtpctx = rtsp_st->transport_priv;
                if (!rtpctx)
                    continue;
                ff_rtp_reset_packet_queue(rtpctx);
                rtpctx->last_rtcp_ntp_time  = AV_NOPTS_VALUE;
                rtpctx->first_rtcp_ntp_time = AV_NOPTS_VALUE;
                rtpctx->base_timestamp      = 0;
                rtpctx->timestamp           = 0;
                rtpctx->unwrapped_timestamp = 0;
                rtpctx->rtcp_ts_offset      = 0;
            }
        }
        if (rt->state == RTSP_STATE_PAUSED) {
    
    
            cmd[0] = 0;
        } else {
    
    
            snprintf(cmd, sizeof(cmd),
                     "Range: npt=%"PRId64".%03"PRId64"-\r\n",
                     rt->seek_timestamp / AV_TIME_BASE,
                     rt->seek_timestamp / (AV_TIME_BASE / 1000) % 1000);
        }
        ff_rtsp_send_cmd(s, "PLAY", rt->control_uri, cmd, reply, NULL);
        if (reply->status_code != RTSP_STATUS_OK) {
    
    
            return ff_rtsp_averror(reply->status_code, -1);
        }
        if (rt->transport == RTSP_TRANSPORT_RTP &&
            reply->range_start != AV_NOPTS_VALUE) {
    
    
            for (i = 0; i < rt->nb_rtsp_streams; i++) {
    
    
                RTSPStream *rtsp_st = rt->rtsp_streams[i];
                RTPDemuxContext *rtpctx = rtsp_st->transport_priv;
                AVStream *st = NULL;
                if (!rtpctx || rtsp_st->stream_index < 0)
                    continue;

                st = s->streams[rtsp_st->stream_index];
                rtpctx->range_start_offset =
                    av_rescale_q(reply->range_start, AV_TIME_BASE_Q,
                                 st->time_base);
            }
        }
    }
    rt->state = RTSP_STATE_STREAMING;
    return 0;
}

如果不是实时流的画就会调用avio_pause函数

int avio_pause(AVIOContext *s, int pause)
{
    
    
    if (!s->read_pause)
        return AVERROR(ENOSYS);
    return s->read_pause(s->opaque, pause);
}

pause为1就暂停,0就继续

3.3检测是否需要seek

//  3 检测是否seek
        if (is->seek_req) {
    
     // 是否有seek请求
            int64_t seek_target = is->seek_pos; // 目标位置
            int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
            int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
            // 前进seek seek_rel>0
            //seek_min    = seek_target - is->seek_rel + 2;
            //seek_max    = INT64_MAX;
            // 后退seek seek_rel<0
            //seek_min = INT64_MIN;
            //seek_max = seek_target + |seek_rel| -2;
            //seek_rel =0  鼠标直接seek
            //seek_min = INT64_MIN;
            //seek_max = INT64_MAX;

            // FIXME the +-2 is due to rounding being not done in the correct direction in generation
            //      of the seek_pos/seek_rel variables
            // 修复由于四舍五入,没有再seek_pos/seek_rel变量的正确方向上进行
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
            if (ret < 0) {
    
    
                av_log(NULL, AV_LOG_ERROR,
                       "%s: error while seeking\n", is->ic->url);
            } else {
    
    
                /* seek的时候,要把原先的数据情况,并重启解码器,put flush_pkt的目的是告知解码线程需要
                 * reset decoder
                 */
                if (is->audio_stream >= 0) {
    
     // 如果有音频流
                    packet_queue_flush(&is->audioq);    // 清空packet队列数据
                    // 放入flush pkt, 用来开起新的一个播放序列, 解码器读取到flush_pkt也清空解码器
                    packet_queue_put(&is->audioq, &flush_pkt);
                }
                if (is->subtitle_stream >= 0) {
    
     // 如果有字幕流
                    packet_queue_flush(&is->subtitleq); // 和上同理
                    packet_queue_put(&is->subtitleq, &flush_pkt);
                }
                if (is->video_stream >= 0) {
    
        // 如果有视频流
                    packet_queue_flush(&is->videoq);    // 和上同理
                    packet_queue_put(&is->videoq, &flush_pkt);
                }
                if (is->seek_flags & AVSEEK_FLAG_BYTE) {
    
    
                    set_clock(&is->extclk, NAN, 0);
                } else {
    
    
                    set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
                }
            }
            is->seek_req = 0;
            is->queue_attachments_req = 1;
            is->eof = 0;        // 细节
            if (is->paused)
                step_to_next_frame(is); // 播放seek后的第一帧
        }

主要就是检测seek_req是否为1,如果为1就要seek一下,调用avformat_seek_file函数即可,然后seek成功就清楚一下packet队列,并且把seek_req置为0,如果paused为1,也就是暂停状态,那么我们就要给出seek后的一帧显示出来,然后继续暂停.

3.4 检测video是否为attached_pic

        // 4 检测video是否为attached_pic
        if (is->queue_attachments_req) {
    
    
            // attached_pic 附带的图片。比如说一些MP3,AAC音频文件附带的专辑封面,所以需要注意的是音频文件不一定只存在音频流本身
            if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
    
    
                AVPacket copy = {
    
     0 };
                if ((ret = av_packet_ref(&copy, &is->video_st->attached_pic)) < 0)
                    goto fail;
                packet_queue_put(&is->videoq, &copy);
                packet_queue_put_nullpacket(&is->videoq, is->video_stream);
            }
            is->queue_attachments_req = 0;
        }

这个一般出现在MP3这种专辑音频的时候需要一张图片,这种图片保存在video_st->attached_pic中,如果 is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC为真的话就说明有附带图片.

3.5 检测队列是否已经有足够数据

        /* 缓存队列有足够的包,不需要继续读取数据 */
        if (infinite_buffer<1 &&
            (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
             || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
                 stream_has_enough_packets(is->video_st, is->video_stream, &is->videoq) &&
                 stream_has_enough_packets(is->subtitle_st, is->subtitle_stream, &is->subtitleq)))) {
    
    
            /* wait 10 ms */
            SDL_LockMutex(wait_mutex);
            // 如果没有唤醒则超时10ms退出,比如在seek操作时这里会被唤醒
            SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
            SDL_UnlockMutex(wait_mutex);
            continue;
        }

is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE 这意思是三个队列包总数据大小不能超过MAX_QUEUE_SIZE (15M),这只是一个经验值,如果是播放4K视频可能就不太够,因为有些视频需要5s才能播放就不行了.

stream_has_enough_packets这个函数就判断对应的队列包是否充足

static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
    
    
    return stream_id < 0 || // 没有该流
           queue->abort_request || // 请求退出
           (st->disposition & AV_DISPOSITION_ATTACHED_PIC) || // 是ATTACHED_PIC
           queue->nb_packets > MIN_FRAMES // packet数>25
               && (!queue->duration ||     // 满足PacketQueue总时长为0
                   av_q2d(st->time_base) * queue->duration > 1.0); //或总时长超过1s
}

3.6 检测码流是否已经播放结束

 // 6 检测码流是否已经播放结束
        if (!is->paused // 非暂停
            && // 这里的执行是因为码流读取完毕后 插入空包所致
            (!is->audio_st // 没有音频流
             || (is->auddec.finished == is->audioq.serial // 或者音频播放完毕
                 && frame_queue_nb_remaining(&is->sampq) == 0))
            && (!is->video_st // 没有视频流
                || (is->viddec.finished == is->videoq.serial // 或者视频播放完毕
                    && frame_queue_nb_remaining(&is->pictq) == 0))) {
    
    
            if (loop != 1           // a 是否循环播放
                && (!loop || --loop)) {
    
    
                // stream_seek不是ffmpeg的函数,是ffplay封装的,每次seek的时候会调用
                stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
            } else if (autoexit) {
    
      // b 是否自动退出
                ret = AVERROR_EOF;
                goto fail;
            }
        }

检测是否播放结束的流程就是检测没有视频流或者解码已解完所有packet,自定义的解码器(decoder)serial等于 PacketQueue的serial(也就是packetQueue全部解码结束),并且FrameQueue中没有数据帧.

音频流亦是如此,并且视频流和音频流都播放完毕才会进入该判断内部,进入之后会判断是否循环和循环次数,判断失败就自动退出

3.7 使用av_read_frame读取数据包

        // 7.读取媒体数据,得到的是音视频分离后、解码前的数据
        ret = av_read_frame(ic, pkt); // 调用不会释放pkt的数据,需要我们自己去释放packet的数据

3.8 检测数据是否读取完毕

        // 8 检测数据是否读取完毕
        if (ret < 0) {
    
    
            if ((ret == AVERROR_EOF || avio_feof(ic->pb))
                && !is->eof)
            {
    
    
                // 插入空包说明码流数据读取完毕了,之前讲解码的时候说过刷空包是为了从解码器把所有帧都读出来
                if (is->video_stream >= 0)
                    packet_queue_put_nullpacket(&is->videoq, is->video_stream);
                if (is->audio_stream >= 0)
                    packet_queue_put_nullpacket(&is->audioq, is->audio_stream);
                if (is->subtitle_stream >= 0)
                    packet_queue_put_nullpacket(&is->subtitleq, is->subtitle_stream);
                is->eof = 1;        // 文件读取完毕
            }
            if (ic->pb && ic->pb->error)
                break;
            SDL_LockMutex(wait_mutex);
            SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
            SDL_UnlockMutex(wait_mutex);
            continue;		// 继续循环 保证线程不退出
        } else {
    
    
            is->eof = 0;
        }

数据读取完毕后,放对应⾳频、视频、字幕队列插⼊“空包”,以通知解码器冲刷buffer,将缓存的所有数 据都解出来frame并去出来。

3.9 检测是否在播放范围内

  // 9 检测是否在播放范围内
        /* check if packet is in play range specified by user, then queue, otherwise discard */
        stream_start_time = ic->streams[pkt->stream_index]->start_time; // 获取流的起始时间
        pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts; // 获取packet的时间戳
        // 这里的duration是在命令行时用来指定播放长度
        pkt_in_play_range = duration == AV_NOPTS_VALUE ||
                            (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
                                        av_q2d(ic->streams[pkt->stream_index]->time_base) -
                                    (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
                                <= ((double)duration / 1000000);

stream_start_time是获取流的起始时间,pkt_ts是当前pkt的pts,pkt_in_play_range就是判断该pkt是否在时间时间范围内部

(pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
                                        av_q2d(ic->streams[pkt->stream_index]->time_base)

这部分就是计算当前pkt处于的时间

(double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000

这个是-ss设置的起始时间

然后相减得到的结果就是时间范围

让其和duration进行比较饥渴

3.10 到这步才将数据插入对应的队列

        // 10 将音视频数据分别送入相应的queue中
        if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
    
    
            packet_queue_put(&is->audioq, pkt);
        } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
                   && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
    
    
            packet_queue_put(&is->videoq, pkt);
        } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
    
    
            packet_queue_put(&is->subtitleq, pkt);
        } else {
    
    
            av_packet_unref(pkt);// // 不入队列则直接释放数据
        }
    }

4. 退出线程

    if (ic && !is->ic)
        avformat_close_input(&ic);

    if (ret != 0) {
    
    
        SDL_Event event;

        event.type = FF_QUIT_EVENT;
        event.user.data1 = is;
        SDL_PushEvent(&event);
    }
    SDL_DestroyMutex(wait_mutex);

发送的FF_QUIT_EVENT时间给SDL,SDL会读取然后退出.

猜你喜欢

转载自blog.csdn.net/m0_60565784/article/details/131773898