ffmpeg如何从内存读取h264音视频流进行解码显示

由于项目组重组,自己有幸开始做音视频编解码方面的研发工作,现将自己近期的工作收获作为BOLG的方式记录起来,方便自己日后查阅和学习。说到H264编解码,不能不提到ffmpeg,据自己查证的资料显示,现大部分软件的H264编解码基本都是使用ffmpeg作为自己的三方库工具,关于ffmpeg有多牛,这里不作赘述。

按照之前查阅的资料,ffmpeg可以解码rtp网络流、从内存读取数据流来解码、读取文件流并解码,本篇幅主要介绍ffmpeg如何从内存读取h264数据流并解码显示,这里只重点关注成功解码h264视频流的关键步骤,关于视频显示与音视叔同步部分将在后续内容继续更新!

  先帖上部分代码:

 
  1. CDecoder::CDecoder()

  2. :m_avFmtContext( NULL )

  3. ,m_pFrame( NULL )

  4. ,m_pCodecCtx( NULL )

  5. ,m_bQuit( false )

  6. ,m_openTd( NULL )

  7. ,m_disPlayTd( NULL )

  8. ,m_direcDraw( NULL )

  9. {

  10. av_register_all();

  11. }

  12.  
  13. CDecoder::~CDecoder()

  14. {

  15.  
  16. }

  17.  
  18. bool CDecoder::init( void *hwnd )

  19. {

  20. if ( m_avFmtContext )

  21. {

  22. avformat_close_input( &m_avFmtContext );

  23. m_avFmtContext = NULL;

  24. }

  25.  
  26. if ( !m_avFmtContext )

  27. {

  28. m_avFmtContext = avformat_alloc_context();

  29.  
  30. AVCodec* pCodec = avcodec_find_decoder( AV_CODEC_ID_H264 );

  31.  
  32. if ( pCodec )

  33. {

  34. av_format_set_video_codec( m_avFmtContext, pCodec );

  35.  
  36. m_avFmtContext->video_codec_id = AV_CODEC_ID_H264;//AV_CODEC_ID_H264;

  37. }

  38. }

  39.  
  40. if ( !m_direcDraw )

  41. {

  42. m_direcDraw = new CDirectDraw((HWND)hwnd );

  43. m_direcDraw->dirrectDrawInit((HWND)hwnd);

  44. }

  45.  
  46. m_bQuit = false;

  47. if ( !m_spOpenThread )

  48. {

  49. m_spOpenThread.reset( new boost::thread( boost::bind( &CDecoder::openStream, this ) ) );

  50. }

 
  1. return true;

  2. }

  3.  
  4. void CDecoder::openStream()

  5. {

  6. uint8_t* pBuf = (uint8_t *)av_mallocz( sizeof(uint8_t) * BUF_SIZE );

  7.  
  8. m_avFmtContext->pb = avio_alloc_context( pBuf, BUF_SIZE, 0, this, readRawDataCB, NULL, NULL );

  9.  
  10. if ( !m_avFmtContext->pb )

  11. {

  12. std::cout << "avio_alloc_context error!" << std::endl;

  13.  
  14. return;

  15. }

  16.  
  17. // [ 探测流信息,主要来填充AVInputFormat结构体,为下面的打开流IO作准备 ]

  18. AVInputFormat *pAvInputFmt = NULL;

  19. //AVInputFormat *pAvInputFmt = av_find_input_format("h264");

  20. if ( av_probe_input_buffer( m_avFmtContext->pb, &pAvInputFmt, NULL, NULL, 0, 0 ) < 0 )

  21. {

  22. std::cout << __FUNCTION__ << " : " << __LINE__ << " error! " << std::endl;

  23. //avio_close( m_avFmtContext->pb );

  24. //av_err2str

  25. m_avFmtContext->pb = NULL;

  26. return;

  27. }

  28.  
  29. av_init_packet(&m_avpkt);

  30.  
  31. if ( !m_pFrame )

  32. {

  33. m_pFrame = av_frame_alloc();

  34. }

  35.  
  36. AVFrame *pFrameRGB = NULL;

  37.  
  38. // [ 打开流 ]

  39. if ( avformat_open_input( &m_avFmtContext, NULL, pAvInputFmt, NULL ) < 0 )

  40. {

  41. std::cout << "avformat_open_input error!" << std::endl;

  42.  
  43. return;

  44. }

  45.  
  46. //读取数据

  47. int frameFinished = -1;

  48. int videoStreamNum = -1;

  49. AVCodecContext *pCodecCtx = NULL;

  50. AVCodec *pCodec = NULL;

  51. static struct SwsContext *img_convert_ctx = NULL;

  52. while( true )

  53. {

  54. if ( m_bQuit )

  55. return;

  56.  
  57. int ret = av_read_frame(m_avFmtContext, &m_avpkt);

  58. if ( ret < 0 )

  59. {

  60. boost::thread::sleep( boost::get_system_time() + boost::posix_time::milliseconds( 10) );

  61. continue;

  62. }

  63.  
  64. if ( videoStreamNum == -1 )

  65. {

  66. // [ 查找是否有视频流 ]

  67. for(int i=0; i<(m_avFmtContext->nb_streams); i++)

  68. {

  69. if(m_avFmtContext->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) //找出视频流

  70. {

  71. videoStreamNum=i;

  72. break;

  73. }

  74. }

  75.  
  76. if(videoStreamNum==-1)

  77. return;

  78.  
  79. // [ 找出解码器信息上下文和AVCodec解码器,用于下面打开解码器和解码AvPacket ]

  80.  
  81. pCodecCtx = m_avFmtContext->streams[videoStreamNum]->codec;

  82.  
  83. // 从视频流中找出相应解码器

  84. pCodec=avcodec_find_decoder(m_avFmtContext->streams[videoStreamNum]->codec->codec_id);

  85. if(pCodec==NULL)

  86. {

  87. fprintf(stderr, "Unsupported codec!\n");

  88. return;

  89. }

  90.  
  91. // [ 打开流信息中查找到流解码器 ]

  92. if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)

  93. return; // Could not open codec

  94.  
  95. pCodecCtx->width = 1920;

  96. pCodecCtx->height = 1080;

  97. pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;

  98.  
  99. if ( !pFrameRGB )

  100. {

  101. pFrameRGB = av_frame_alloc();

  102.  
  103. if ( !pFrameRGB )

  104. return;

  105.  
  106. pFrameRGB->width = pCodecCtx->width;

  107. pFrameRGB->height = pCodecCtx->height;

  108. pFrameRGB->format = PIX_FMT_YUV420P;

  109.  
  110. if ( av_image_alloc( pFrameRGB->data, pFrameRGB->linesize,

  111. pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, 1 ) < 0 )

  112. {

  113. av_frame_free( &pFrameRGB );

  114.  
  115. pFrameRGB = NULL;

  116.  
  117. return;

  118. }

  119. }

  120.  
  121. img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,

  122. pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,

  123. PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);

  124. }

  125.  
  126. if(m_avpkt.stream_index==videoStreamNum)

  127. {

  128. // 解码

  129. avcodec_decode_video2( pCodecCtx, m_pFrame, &frameFinished, &m_avpkt );

  130.  
  131. if ( frameFinished > 0 )

  132. {

  133. static uint8_t *p = NULL;

  134.  
  135. p = m_pFrame->data[1];

  136. m_pFrame->data[1] = m_pFrame->data[2];

  137. m_pFrame->data[2] = p;

  138.  
  139. sws_scale(img_convert_ctx, m_pFrame->data, m_pFrame->linesize,

  140. 0, pCodecCtx->height,pFrameRGB->data,pFrameRGB->linesize);

  141.  
  142. sPictureSize sPicSize;

  143. sPicSize.nHeight = pCodecCtx->height;

  144. sPicSize.nWidth = pCodecCtx->width;

  145. m_direcDraw->dirrectDrawInputData(pFrameRGB->data[0], sPicSize);

  146. m_direcDraw->directDrawUpDateData();

  147.  
  148. //SaveAsBMP( pFrameRGB, pCodecCtx->width, pCodecCtx->height, 1, 1 );

  149. }

  150. }

  151.  
  152. av_free_packet(&m_avpkt);

  153. }

  154.  

猜你喜欢

转载自blog.csdn.net/special00/article/details/82533794