ffplay学习之VideoState(三)

夜色已深,水一水博客。

一、ffplay的其它数据结构

存储有关于音频的参数

typedef struct AudioParams {
    
    
    int freq;                   //采样率
    AVChannelLayout ch_layout;  //通道数
    enum AVSampleFormat fmt;   //音频采样格式 包含lrlrlrlr lllllrrrrr
    int frame_size;            //一个采样单元占用的字节数
    int bytes_per_sec;         //每秒字节数  eg: 48kHZ 2通道 16bit/ 8 = 2B
                               // 48000 * 2 * 2 = 192000 B/s
} AudioParams;

时钟的结构,fflpay有三个时钟,分别是音频、视频、外部时钟,播放音视频流量时会做时间同步,保证音话一致。

typedef struct Clock {
    
    
    double pts;           /* clock base */
    double pts_drift;     /* clock base minus time at which we updated the clock */
    double last_updated;
    double speed;
    int serial;           /* clock is based on a packet with this serial */
    int paused;
    int *queue_serial;    /* pointer to the current packet queue serial, used for obsolete clock detection */
} Clock;

解码器参数封装

typedef struct Decoder {
    
    
    AVPacket *pkt;
    PacketQueue *queue;
    AVCodecContext *avctx;   // 解码器上下⽂
    int pkt_serial;
    int finished;            //判断解码器是否工作
    int packet_pending;      // 解码器的状态 0 异常 1 正常
    SDL_cond *empty_queue_cond; //队列为空时,发送信号读取数据
    int64_t start_pts;
    AVRational start_pts_tb;
    int64_t next_pts;
    AVRational next_pts_tb;
    SDL_Thread *decoder_tid;
} Decoder;

二、videoState ffplay的管理者
videoState 是整个ffplay的核心管理者,所有资源的申请和释放以及线程的状态变化都是由其管理。videoState 在main函数中被创建,整个运行周期都有它的身影

typedef struct VideoState {
    
    
    SDL_Thread *read_tid;   //读取线程
    const AVInputFormat *iformat;  //解封装时使用
    int abort_request;   //判断是否退出播放
    int force_refresh;   //是否刷新画面
    int paused;          //暂停
    int last_paused;     
    int queue_attachments_req;
    int seek_req;
    int seek_flags;
    int64_t seek_pos;
    int64_t seek_rel;
    int read_pause_return;
    AVFormatContext *ic;
    int realtime;

    Clock audclk;    //音频时钟
    Clock vidclk;    //视频时钟
    Clock extclk;    //外部时钟

    FrameQueue pictq;
    FrameQueue subpq;
    FrameQueue sampq;

    Decoder auddec;
    Decoder viddec;
    Decoder subdec;

    int audio_stream;

    int av_sync_type;

    double audio_clock;
    int audio_clock_serial;
    double audio_diff_cum; /* used for AV difference average computation */
    double audio_diff_avg_coef;
    double audio_diff_threshold;
    int audio_diff_avg_count;
    AVStream *audio_st;
    PacketQueue audioq;
    int audio_hw_buf_size;
    uint8_t *audio_buf;
    uint8_t *audio_buf1;
    unsigned int audio_buf_size; /* in bytes */
    unsigned int audio_buf1_size;
    int audio_buf_index; /* in bytes */
    int audio_write_buf_size;
    int audio_volume;
    int muted;
    struct AudioParams audio_src;
#if CONFIG_AVFILTER
    struct AudioParams audio_filter_src;
#endif
    struct AudioParams audio_tgt;
    struct SwrContext *swr_ctx;
    int frame_drops_early;
    int frame_drops_late;

    enum ShowMode {
    
    
        SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
    } show_mode;
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
    int sample_array_index;
    int last_i_start;
    RDFTContext *rdft;
    int rdft_bits;
    FFTSample *rdft_data;
    int xpos;
    double last_vis_time;
    SDL_Texture *vis_texture;    
    SDL_Texture *sub_texture;   // 字幕显示
    SDL_Texture *vid_texture;   // 视频显示

    int subtitle_stream;
    AVStream *subtitle_st;
    PacketQueue subtitleq;

    double frame_timer;
    double frame_last_returned_time;
    double frame_last_filter_delay;
    int video_stream;
    AVStream *video_st;
    PacketQueue videoq;
    double max_frame_duration;      // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
    struct SwsContext *img_convert_ctx;
    struct SwsContext *sub_convert_ctx;
    int eof;

    char *filename;
    int width, height, xleft, ytop;
    int step;

#if CONFIG_AVFILTER
    int vfilter_idx;
    AVFilterContext *in_video_filter;   // the first filter in the video chain
    AVFilterContext *out_video_filter;  // the last filter in the video chain
    AVFilterContext *in_audio_filter;   // the first filter in the audio chain
    AVFilterContext *out_audio_filter;  // the last filter in the audio chain
    AVFilterGraph *agraph;              // audio filter graph
#endif

    int last_video_stream, last_audio_stream, last_subtitle_stream;

    SDL_cond *continue_read_thread;
} VideoState;

猜你喜欢

转载自blog.csdn.net/weixin_42764231/article/details/127622047