FFMPEG plays RTSP video stream

Function introduction:

QT+FFMPEG is used to implement the basic operation of RTSP video stream playback. After clicking the button, the RTSP stream at the specified address will be pulled and displayed through Label in the QT interface. Development environment
:

    System environment: Ubuntu
    QT: 5.12.12
    FFmpeg: 4.4 (currently the latest)

 

Reference Code:

FFMPEG official example: FFmpeg: decode_video.c
Detailed introduction:
(1) Add library file

Create a new QT project and add the paths to the lib directory and include directory in pro.
 

##ffmpeg
FFMPEG_LIB = /usr/local/ffmpeg/lib
FFMPEG_INCLUDE = /usr/local/ffmpeg/include
 
INCLUDEPATH += $$FFMPEG_INCLUDE \
 
LIBS += $$FFMPEG_LIB/libavcodec.so \
        $$FFMPEG_LIB/libavdevice.so \
        $$FFMPEG_LIB/libavfilter.so \
        $$FFMPEG_LIB/libavformat.so \
        $$FFMPEG_LIB/libavutil.so \
        $$FFMPEG_LIB/libswresample.so \
        $$FFMPEG_LIB/libswscale.so \
(2) Interface configuration

In MainWindow.ui, add a QPushButton and QLabel control, and add a "go to slot" and add on_pushButton_clicked().

(3) Delay function
#include <QTime>
//以毫秒为单位设置延时
void Delay(int msec)
{
    QTime dieTime = QTime::currentTime().addMSecs(msec);
    while(QTime::currentTime() < dieTime){
        QCoreApplication::processEvents(QEventLoop::AllEvents,100);
    }
}
(4) FFmpeg video decoding

There are 8 overall processing steps, as shown in the figure

1. Define relevant variables

    AVFormatContext *pFormatCtx = NULL;
    AVCodecContext *pCodecCtx = NULL;
    const AVCodec *pCodec = NULL;
    AVFrame *pFrame,*pFrameRGB;
    AVPacket *packet;
    struct SwsContext *img_convert_ctx;
 
    unsigned char *out_buffer;
    int i,videoIndex;
    int ret;
    char errors[1024] = "";
 
    //rtsp地址:
    char url[] = "rtsp://192.168.111.60:554/LiveMedia/ch1/Media1";

2. Initialize related modules

    //初始化FFMPEG  调用了这个才能正常适用编码器和解码器
    pFormatCtx = avformat_alloc_context();  //init FormatContext
    //初始化FFmpeg网络模块
    avformat_network_init();    //init FFmpeg network

3. Open video files and get video information

    //open Media File
    ret = avformat_open_input(&pFormatCtx,url,NULL,NULL);
    if(ret != 0){
        av_strerror(ret,errors,sizeof(errors));
        cout <<"Failed to open video: ["<< ret << "]"<< errors << endl;
        exit(ret);
    }
 
    //Get audio information
    ret = avformat_find_stream_info(pFormatCtx,NULL);
    if(ret != 0){
        av_strerror(ret,errors,sizeof(errors));
        cout <<"Failed to get audio info: ["<< ret << "]"<< errors << endl;
        exit(ret);
    }

4. Find streaming information in videos

    //循环查找视频中包含的流信息,直到找到视频类型的流
    //便将其记录下来 videoIndex
    //这里我们现在只处理视频流  音频流先不管他
    for (i = 0; i < pFormatCtx->nb_streams; i++) {
        if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoIndex = i;
        }
    }
 
    //如果videoIndex为-1 说明没有找到视频流
    if (videoIndex == -1) {
        printf("Didn't find a video stream.\n");
        return;
    }

5. Configure encoding context, AVCodecContext content

    //配置编码上下文,AVCodecContext内容
    //1.查找解码器
    pCodec = avcodec_find_decoder(pFormatCtx->streams[videoIndex]->codecpar->codec_id);
    //2.初始化上下文
    pCodecCtx = avcodec_alloc_context3(pCodec);
    //3.配置上下文相关参数
    avcodec_parameters_to_context(pCodecCtx,pFormatCtx->streams[videoIndex]->codecpar);
    //4.打开解码器
    ret = avcodec_open2(pCodecCtx, pCodec, NULL);
    if(ret != 0){
        av_strerror(ret,errors,sizeof(errors));
        cout <<"Failed to open Codec Context: ["<< ret << "]"<< errors << endl;
        exit(ret);
    }

6. Create video frames and configure related parameters

    //初始化视频帧
    pFrame = av_frame_alloc();
    pFrameRGB = av_frame_alloc();
    //为out_buffer申请一段存储图像的内存空间
    out_buffer = (unsigned char*)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_RGB32,pCodecCtx->width,pCodecCtx->height,1));
    //实现AVFrame中像素数据和Bitmap像素数据的关联
    av_image_fill_arrays(pFrameRGB->data,pFrameRGB->linesize, out_buffer,
                   AV_PIX_FMT_RGB32,pCodecCtx->width, pCodecCtx->height,1);
    //为AVPacket申请内存
    packet = (AVPacket *)av_malloc(sizeof(AVPacket));
    //打印媒体信息
    av_dump_format(pFormatCtx,0,url,0);
    //初始化一个SwsContext
    img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
                pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
                AV_PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL);

7. Through the while loop, process each video frame and render it to the Label

    //读取帧数据,并通过av_read_frame的返回值确认是不是还有视频帧
    while(av_read_frame(pFormatCtx,packet) >=0){
        //判断视频帧
        if(packet->stream_index == videoIndex){
            //解码视频帧
            ret = avcodec_send_packet(pCodecCtx, packet);
            ret = avcodec_receive_frame(pCodecCtx, pFrame);
            if(ret != 0){
                av_strerror(ret,errors,sizeof(errors));
                cout <<"Failed to decode video frame: ["<< ret << "]"<< errors << endl;
            }
            if (ret == 0) {
                //处理图像数据
                sws_scale(img_convert_ctx,
                                        (const unsigned char* const*) pFrame->data,
                                        pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data,
                                        pFrameRGB->linesize);
                QImage img((uchar*)pFrameRGB->data[0],pCodecCtx->width,pCodecCtx->height,QImage::Format_RGB32);
                ui->label->setPixmap(QPixmap::fromImage(img));
                //释放前需要一个延时
                Delay(1);
            }
        }
        //释放packet空间
        av_packet_unref(packet);
    }

8. Release resources after completion

    //close and release resource
    av_free(out_buffer);
    av_free(pFrameRGB);
 
    sws_freeContext(img_convert_ctx);
    avcodec_close(pCodecCtx);
    avcodec_free_context(&pCodecCtx);
    avformat_close_input(&pFormatCtx);
    exit(0);

Guess you like

Origin blog.csdn.net/xiehuanbin/article/details/133127539