ijkplayer source code analysis video decoding process

Preface

This article is the fifth part of the process analysis, analyzing the video decoding process in ijkPlayer, in video_thread, as shown in the following flowchart.
Insert picture description here

IJKFF_Pipenode structure

Defined in ff_ffpipenode.h and ff_ffpipenode.c

ffpipenode stands for video decoder, which encapsulates soft and hard solutions.

// ff_ffpipenode.h
typedef struct IJKFF_Pipenode_Opaque IJKFF_Pipenode_Opaque;
typedef struct IJKFF_Pipenode IJKFF_Pipenode;
struct IJKFF_Pipenode {
    
    
    SDL_mutex *mutex;
    void *opaque;

    void (*func_destroy) (IJKFF_Pipenode *node);
    int  (*func_run_sync)(IJKFF_Pipenode *node);
    int  (*func_flush)   (IJKFF_Pipenode *node); // optional
};

IJKFF_Pipenode *ffpipenode_alloc(size_t opaque_size);
void ffpipenode_free(IJKFF_Pipenode *node);
void ffpipenode_free_p(IJKFF_Pipenode **node);

int  ffpipenode_run_sync(IJKFF_Pipenode *node);
int  ffpipenode_flush(IJKFF_Pipenode *node);
  • Pipenode initialization process:
    call pipeline.ffpipeline_open_video_decoder in stream_component_open to create

video_thread call process

The video frame decoding operation is in the video_thread thread, the video_thread reads the video packet from the packet_queue, and after soft/hard decoding, puts it into the frame_queue through queue_picture.

// ff_ffpipenode.h
typedef struct IJKFF_Pipenode IJKFF_Pipenode;
struct IJKFF_Pipenode {
    
    
    SDL_mutex *mutex;
    void *opaque;

    void (*func_destroy) (IJKFF_Pipenode *node);
    int  (*func_run_sync)(IJKFF_Pipenode *node);
    int  (*func_flush)   (IJKFF_Pipenode *node); // optional
};

// ffpipeline_android.c
static IJKFF_Pipenode *func_open_video_decoder(IJKFF_Pipeline *pipeline, FFPlayer *ffp)
{
    
    
    IJKFF_Pipeline_Opaque *opaque = pipeline->opaque;
    IJKFF_Pipenode        *node = NULL;

    if (ffp->mediacodec_all_videos || ffp->mediacodec_avc || ffp->mediacodec_hevc || ffp->mediacodec_mpeg2)
        // 硬解
        node = ffpipenode_create_video_decoder_from_android_mediacodec(ffp, pipeline, opaque->weak_vout);

    if (!node) {
    
    
        // 硬解创建失败走软解
        node = ffpipenode_create_video_decoder_from_ffplay(ffp);
    }

    return node;
}

// ff_ffplay.c
static int stream_component_open(FFPlayer *ffp, int stream_index) {
    
    
    decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
    // 创建IJKFF_Pipenode,创建并初始化解码器,ffpipenode封装了硬/软解码器
    ffp->node_vdec = ffpipeline_open_video_decoder(ffp->pipeline, ffp);
    decoder_start(&is->viddec, video_thread, ffp, "ff_video_dec")
}

static int video_thread(void *arg) {
    
    
    ret = ffpipenode_run_sync(ffp->node_vdec); // 调用ffpipenode_run_sync
    return ret;
}

// ff_ffpipeline.c
IJKFF_Pipenode* ffpipeline_open_video_decoder(IJKFF_Pipeline *pipeline, FFPlayer *ffp)
{
    
    
    live_log(ffp->inject_opaque, NULL);
    return pipeline->func_open_video_decoder(pipeline, ffp);
}

How video frames are decoded and how are they enqueued

It is distinguished by calling in video_thread ffpipenode_run_sync. Video decoding is divided into soft solution and hard solution; here is a quick pass, and then detailed analysis.
The realization of the soft solution is ffpipenode_ffplay_vdec.c, and
the realization of the hard solution is ffpipenode_android_mediacodec_vdec.c.
After decoding, both of them will be transferred to ff_ffplay.c#queue_picture to join the team.

I will write an article analysis after the soft solution and the hard solution, but the code is easy to understand, the problem is not big, and the soft solution process is similar to the audio decoding.

// 软解,ffpipenode_ffplay_vdec.c
static int ffplay_video_thread(void *arg) {
    
    
    AVFrame *frame = av_frame_alloc();
    for(;;){
    
    
        ret = get_video_frame(ffp, frame); // avcodec_receive_frame软解码获取一帧
        queue_picture(ffp, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
    }
}

// 硬解,ffpipenode_android_mediacodec_vdec.c
static int func_run_sync(IJKFF_Pipenode *node){
    
    
    int got_frame = 0;
    while (!q->abort_request) {
    
    
        drain_output_buffer(env, node, timeUs, &dequeue_count, frame, &got_frame);
        if (got_frame) {
    
    
            // 通过头文件调用到queue_picture
            ffp_queue_picture(ffp, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial);
        }
    }
}

// 入队 ff_ffplay.c
static int
queue_picture(FFPlayer *ffp, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial) {
    
    
    Frame *vp;
    vp = frame_queue_peek_writable(&is->pictq); // 获取一个可写节点
    if (!vp->bmp){
    
    
        alloc_picture(ffp, src_frame->format); // 创建bmp

        vp->allocated = 0;
        vp->width = src_frame->width;
        vp->height = src_frame->height;
        vp->format = src_frame->format;

    }
    if (vp->bmp) {
    
    
        SDL_VoutLockYUVOverlay(vp->bmp); // 锁
        SDL_VoutFillFrameYUVOverlay(vp->bmp, src_frame);  // 调用func_fill_frame把帧画面“绘制”到最终的显示图层上
        SDL_VoutUnlockYUVOverlay(vp->bmp);

        vp->pts = pts;
        vp->duration = duration;
        vp->pos = pos;
        vp->serial = serial;
        vp->sar = src_frame->sample_aspect_ratio;
        vp->bmp->sar_num = vp->sar.num;
        vp->bmp->sar_den = vp->sar.den;

        frame_queue_push(&is->pictq); // 对节点操作结束后,调用frame_queue_push告知FrameQueue“存入”该节点
    }    
}

How the video frame is read and rendered video_refresh_thread

A video_refresh_thread is created in the stream_open method, in which the video frame is read from the frame_queue for audio and video synchronization and then rendered.
Ignore the audio and video synchronization here and talk about the rendering process directly.

static VideoState *stream_open(FFPlayer *ffp, const char *filename, AVInputFormat *iformat) {
    
    
    // 创建video_refresh_thread,单独线程进行视频渲染
    SDL_CreateThreadEx(&is->_video_refresh_tid, video_refresh_thread, ffp, "ff_vout");
}

#define REFRESH_RATE 0.01

static int video_refresh_thread(void *arg) {
    
    
    FFPlayer *ffp = arg;
    VideoState *is = ffp->is;
    double remaining_time = 0.0;
    while (!is->abort_request) {
    
    
        if (remaining_time > 0.0) {
    
    
            // video_refresh里进行音视频同步,更改remaining_time的值,在此休息相应的时间
            av_usleep((int) (int64_t) (remaining_time * 1000000.0));
        }
        remaining_time = REFRESH_RATE;
        if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
            video_refresh(ffp, &remaining_time);
    }

    return 0;
}

static void video_refresh(FFPlayer *opaque, double *remaining_time) {
    
    
    if (!is->video_st) {
    
    
        return;
    }

    if (frame_queue_nb_remaining(&is->pictq) == 0) {
    
    
        // 队列里没有帧,do nothing
    } else {
    
    
        // ... 此处忽略音视频同步,直接渲染下一帧
        frame_queue_next(&is->pictq); // 移动到下一帧
        is->force_refresh = 1;
    }

        if (!ffp->display_disable
            && is->force_refresh
            && is->show_mode == SHOW_MODE_VIDEO
            && is->pictq.rindex_shown) {
    
    

            video_display2(ffp); // 显示
        }
}

static void video_display2(FFPlayer *ffp) {
    
    
    VideoState *is = ffp->is;
    if (is->video_st)
        video_image_display2(ffp);
}

static void video_image_display2(FFPlayer *ffp) {
    
    
    Frame *vp = frame_queue_peek_last(&is->pictq); // 就是要渲染的这一帧

    if (vp->bmp) {
    
    
        // 进行渲染
        SDL_VoutDisplayYUVOverlay(ffp->vout, vp->bmp);
    }
}

Reference:
ijkplayer decoding framework analysis
ijkplayer decoding realization analysis-hard solution
ijkplayer decoding realization analysis-soft solution

Guess you like

Origin blog.csdn.net/u014099894/article/details/112970636