记录AVFrame转化yuv,以及AVFrame2Mat

yuvToRgb(char * outSpace, int outWidth, int outHeight)
{
    
    
	mutex.lock();
	if (!iCtx || !yuv)
	{
    
    
		strcpy(errorbuf, "stream not open or yuv not decoded!");
		mutex.unlock();
		return false;
	}
	//设置转码器
	AVCodecContext * videoCtx = iCtx->streams[this->videoStream]->codec;
	vCtx = sws_getCachedContext(vCtx,
		videoCtx->width, videoCtx->height, videoCtx->pix_fmt,
		outWidth, outHeight, AV_PIX_FMT_BGRA,
		SWS_BICUBIC, NULL, NULL, NULL
	);
	if (!vCtx)
	{
    
    
		//失败
		strcpy(errorbuf, "sws getCachedContext failed!");
		mutex.unlock();
		return false;
	}
	//转码输出空间
	uint8_t *data[AV_NUM_DATA_POINTERS] = {
    
     0 };
	data[0] = (uint8_t *)outSpace;
	//图像宽度
	int linesize[AV_NUM_DATA_POINTERS] = {
    
     0 };
	linesize[0] = outWidth * 4;
	//转码,成功后返回转码图像的高
	int h = sws_scale(vCtx, yuv->data, yuv->linesize, 0, videoCtx->height,
		data, linesize);
	if (h <= 0)
	{
    
    
		//失败
		strcpy(errorbuf, "sws scale failed!");
		mutex.unlock();
		return false;
	}
	mutex.unlock();
	return true;
}
Mat avframe_to_cvmat(AVFrame *frame)
{
    
    
	AVFrame dst; 
	cv::Mat cvmat; 

	memset(&dst, 0, sizeof(dst)); 
	int w = frame->width, h = frame->height; 
	cvmat = cv::Mat(h, w, CV_8UC3); 
	dst.data[0] = (uint8_t *)cvmat.data; 
	avpicture_fill((AVPicture *)&dst, dst.data[0], AV_PIX_FMT_BGR24, w, h); 

	struct SwsContext *img_convert_ctx = NULL; 

	//enum PixelFormat src_pixfmt = (enum PixelFormat)frame->format; 
	AVPixelFormat src_pixfmt = (AVPixelFormat)frame->format; 
	//enum PixelFormat dst_pixfmt = AV_PIX_FMT_BGR24; 
	AVPixelFormat dst_pixfmt = AV_PIX_FMT_BGR24; 
	
	img_convert_ctx = sws_getContext(w, h, src_pixfmt, w, h, dst_pixfmt, SWS_FAST_BILINEAR, NULL, NULL, NULL); 
	sws_scale(img_convert_ctx, frame->data, frame->linesize, 0, h, dst.data, dst.linesize); 
	sws_freeContext(img_convert_ctx); 
	
	return cvmat;
}

猜你喜欢

转载自blog.csdn.net/qq_41776453/article/details/114995913