The RGB image solved by sws_scale is inverted

 AVFrame* pFrame;                   // Frame 
AVCodecContext* pContext;          // Codec Context
int nUsedBytes = avcodec_decode_video(pContext, pFrame,  &nFrame, pSrcBuffer, nSrcLength);
if(nUsedBytes > 0)
{
    AVFrame  out_pic;
    SwsContext* img_convert_ctx = sws_getContext(pContext->width, pContext->height, pContext->pix_fmt, nDestW, nDestH,(PixelFormat)PIX_FMT_BGR24,SWS_BICUBIC, NULL, NULL, NULL);
    if(img_convert_ctx != NULL)
    {
        if(avpicture_alloc((AVPicture *)&out_pic, PIX_FMT_RGB24, nDestW, nDestH)>=0)
        {
            pFrame->data[0] = pFrame->data[0]+pFrame->linesize[0]*(pContext->height-1);
            pFrame->data[1] = pFrame->data[1]+pFrame->linesize[0]*pContext->height/4-1;
    pFrame->data[2] = pFrame->data[2]+pFrame->linesize[0]*pContext->height/4-1;
            pFrame->linesize[0] *= -1;
            pFrame->linesize[1] *= -1;
    pFrame->linesize[2] *= -1;
    sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pContext->height, out_pic.data, out_pic.linesize);
            avpicture_free((AVPicture *)&out_pic);
        }
        sws_freeContext(img_convert_ctx);
    }
} 
static void decode_to_RGB24(const uint8_t * pSrcBuffer, int nSrcLength,int nDestW,int nDestH,AVFrame& out_pic)
{
	AVFrame* pFrame;                   // Frame 
	AVCodecContext* pContext;          // Codec Context
	int nFrame=0;
	int nUsedBytes = avcodec_decode_video(pContext, pFrame, &nFrame, pSrcBuffer, nSrcLength);
	if(nUsedBytes > 0)
	{
		/*AVFrame  out_pic;*/
		SwsContext* img_convert_ctx = sws_getContext(pContext->width, pContext->height, pContext->pix_fmt,
			nDestW, nDestH,(PixelFormat)PIX_FMT_BGR24,SWS_BICUBIC, NULL, NULL, NULL);

		if(img_convert_ctx != NULL)
		{
			if(avpicture_alloc((AVPicture *)&out_pic, PIX_FMT_RGB24, nDestW, nDestH)>=0)
			{
				pFrame->data[0] = pFrame->data[0]+pFrame->linesize[0]*(pContext->height-1);
				pFrame->data[1] = pFrame->data[1]+pFrame->linesize[0]*pContext->height/4-1;
				pFrame->data[2] = pFrame->data[2]+pFrame->linesize[0]*pContext->height/4-1;
				pFrame->linesize[0] *= -1;
				pFrame->linesize[1] *= -1;
				pFrame->linesize[2] *= -1;
				sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pContext->height, out_pic.data, out_pic.linesize);
				avpicture_free((AVPicture *)&out_pic);
			}
			sws_freeContext(img_convert_ctx);
		}
	}
}


 

The data and linesize of AVFrame:

YUV: linesize[0] =  width + padding size(16+16),linesize[1]=linesize[0]/2

        data[0], data[1], data[2] represent yuv respectively 

RGB: linesize[0] = width*pixel_size  for RGB        data[0]为packet rgb

 


Guess you like

Origin blog.csdn.net/panpanloveruth/article/details/6904921