ffmpeg 开源视、音频解码

1、编译生成ffmpeg 库
2、引用

extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
#include "libavutil/common.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
}
#include "opencv.hpp"
#include <string>
using namespace std;

3、调用

//初始化libavformat 和注册所有的组合和分离器与协议
av_register_all();
//数据来自于网络,一定要调用下面的初始化方法
avformat_network_init();
/初始化中间参数对象,用于过程中的数据传递
m_PFmtctx = avformat_alloc_context();
//连接参数设置
AVDictionary *options=NULL;
int result = av_dict_set(&options, "rtsp_transport", "tcp", 0);
if (result < 0)
    printf("Set rtsp transport tcp failed!\n"); 
av_dict_set(&options, "stimeout", "2000000", 0);//timout set , us
//使用设置的连接参数建立网络连接
if((res = avformat_open_input(&m_PFmtctx, url.c_str(), NULL, &options)) < 0){
     m_strError = "open input file failed !";
     res = AVERROR_INVALIDDATA;
     flag = S_FALSE;
     break;
}
//赋值流结构体,并且打开和初始化解码器
if((res = avformat_find_stream_info(m_PFmtctx,NULL)) < 0){
      m_strError = "find stream info failed !";
      flag = S_FALSE;
      break;
}

m_VideoStream = m_AudioStream = -1;
//检索视频流数据位置        
for(res = 0; res < m_PFmtctx->nb_streams; res++){
    if(m_PFmtctx->streams[res]->codec->codec_type == AVMEDIA_TYPE_VIDEO){
         m_VideoStream = res;
        break;
     }
}

//获取视频解码器
if(m_VideoStream != -1)
{
     m_PCodctx = m_PFmtctx->streams[m_VideoStream]->codec;
     m_PCodec = avcodec_find_decoder(m_PCodctx->codec_id);
     if(m_PCodec == NULL){
        m_strError = "can not find video decoder !";
        flag = S_FALSE;
        break;
     }
     else
         m_DecodeInfo.m_videoCodeName = m_PCodec->name;

     if (m_PCodctx->codec_id == AV_CODEC_ID_PNG || \
         m_PCodctx->codec_id == AV_CODEC_ID_BMP || \
         m_PCodctx->codec_id == AV_CODEC_ID_MJPEG || \
         m_PCodctx->codec_id == AV_CODEC_ID_TIFF || \
         m_PCodctx->codec_id == AV_CODEC_ID_GIF || \
         m_PCodctx->codec_id == AV_CODEC_ID_ANSI) {
            m_strError = "no video stream !";
            flag = S_FALSE;
            break;
     }

     //打开解码器
     if ((res = avcodec_open2(m_PCodctx,m_PCodec,NULL)) < 0){
         m_strError = "open video decoder failed !";
         flag = S_FALSE;
         break;
     }
}

//获取图像大小
int numBytes = avpicture_get_size(AV_PIX_FMT_BGR24,
                             m_PCodctx->width,m_PCodctx->height);
m_PBuf = (uint8_t*)av_malloc(numBytes);

if(!m_PBuf){
     m_strError = "alloc buf failed !";
     flag = S_FALSE;
     break;
}

m_PFrameRGB = av_frame_alloc();
m_PFrame = av_frame_alloc();
//将m_PFrameRGB依据BGR24的格式与m_PBuf空间进行关联    
avpicture_fill((AVPicture*)m_PFrameRGB,m_PBuf,AV_PIX_FMT_BGR24,m_PCodctx->width, 
            m_PCodctx->height);
//初始化解码器
m_PSwsctx = sws_getContext(m_PCodctx->width,m_PCodctx->height,
                         m_PCodctx->pix_fmt, m_PCodctx->width,m_PCodctx->height,
                         AV_PIX_FMT_BGR24,SWS_POINT,NULL,NULL,NULL);
//从通道中获取数据包
if ((res = av_read_frame(m_PFmtctx, &m_Avpkt)) < 0)
{
    PostMessage(m_Hwnd, WM_CLOSE_DEVICE, 0, 0);
    return -2;
}

//获取的数据是否来自于视频流通道
if(m_Avpkt.stream_index == m_VideoStream)
{
    if(!m_PCodctx||!m_PFrame)
    {
         result = -1;
        av_free_packet(&m_Avpkt);
         break;
    }

    //从压缩数据m_Avpkt中解码一帧数据           
    if((res = avcodec_decode_video2(m_PCodctx,m_PFrame,&got_picture_ptr,&m_Avpkt) < 0))
    {
          m_strError = "video decode failed !";
          result = -1;
    }

    if(got_picture_ptr == 1)
    {
        EnterCriticalSection(&g_CriticalSection);
        //将帧数据转换为BGR数据
        sws_scale(m_PSwsctx,m_PFrame->data, m_PFrame->linesize, 0, m_PFrame->height,
                   m_PFrameRGB->data, m_PFrameRGB->linesize);
        LeaveCriticalSection(&g_CriticalSection);
    }
}
//从通道中释放数据包结构
av_free_packet(&m_Avpkt);

4.3.2 帧数据转为CvMat
EnterCriticalSection(&g_CriticalSection);
IplImage* frame = cvCreateImageHeader(cvSize(m_PFrame->width,m_PFrame->height),
                                      IPL_DEPTH_8U, 3);
cvSetData(frame,m_PFrameRGB->data[0],m_PFrameRGB->linesize[0]);/*CV_8UC3*/
cv::Mat img(frame,0);
cvReleaseImageHeader(&frame);
LeaveCriticalSection(&g_CriticalSection);

4.4.1 绘制视频数据
//获取显示区域大小
GetClientRect(m_ShowWnd, &clientRect);
hMemDC = CreateCompatibleDC(hDC);
hBmp = CreateCompatibleBitmap(hDC, clientRect.right, clientRect.bottom);        
SelectObject(hMemDC,hBmp);
SelectObject(hMemDC, m_hBluePen);
SelectObject(hMemDC, m_hHeadpeakBrush);

SetStretchBltMode(hMemDC, COLORONCOLOR);
cv::Mat drawMat;
cv::resize(tpdestMat, drawMat, cv::Size(m_lWidth, m_lHeight));
//绘制视频图像
StretchDIBits(hMemDC, 
            0, 0, clientRect.right, clientRect.bottom, 
            0, 0, drawMat.cols, drawMat.rows,
            drawMat.data,
            (BITMAPINFO*)m_pInfo,
            DIB_RGB_COLORS, SRCCOPY);
//绘制到控件
BitBlt(hDC,0,0,clientRect.right,clientRect.bottom,hMemDC,0,0,SRCCOPY);

DeleteDC(hMemDC);
DeleteObject(hBmp);

おすすめ

転載: blog.csdn.net/u012842273/article/details/70318795