FFMPEG-摄像头采集保存YUV + 读取摄像头并编码封装保存成flv

最简单的基于FFmpeg的AVDevice例子(读取摄像头)–Libavdevice支持??设备作为输入端/输出端

FFmpeg获取DirectShow设备数据(摄像头,录屏)
①首先要ffmpeg安装到windows:static版本放到某一个目录下并添加到系统环境变量path中
  查看版本:ffmpeg-version
在这里插入图片描述
②命令:列出设备: ffmpeg -list_devices true -f dshow -i dummy
在这里插入图片描述
问题①:无法列出设备:没有安装screen-capture-recorder和virtual-audio-capturer
ffmpeg 录屏 screen capture recorder
问题②:设备名显示为中文乱码
//列表显示设备的名称很重要,输入的时候都是使用“-f dshow -i video="{设备名}"”的方式。
下面是正确显示:
[dshow @0388f5e0] DirectShow video devices
[dshow @0388f5e0] “Integrated Camera”
[dshow @0388f5e0] “screen-capture-recorder”
[dshow @0388f5e0] DirectShow audio devices
[dshow @0388f5e0] “鍐呰楹﹀厠椋?(Conexant20672 SmartAudi”
[dshow @0388f5e0] “virtual-audio-capturer”
//screen-capture-recorder和virtual-audio-capturer需要安装 Integrated Camera例如联想笔记本上的一体化摄像头

2. 获取摄像头数据(保存为本地文件或者发送实时流)

2.1. 编码为H.264,保存为本地文件

下面这条命令,实现了从摄像头读取数据并编码为H.264,最后保存成mycamera.mkv。

ffmpeg -f dshow -i video="Integrated Camera" -vcodec libx264 mycamera.mkv

最简单的基于Libavdevice的摄像头数据读取一帧YUV数据,并保存成output.yuv文件:

①普通的文件输入:
  AVFormatContext *pFormatCtx = avformat_alloc_context();
  avformat_open_input(&pFormatCtx, “test.h265”,NULL,NULL);
②利用设备输入:
  AVFormatContext *pFormatCtx = avformat_alloc_context();
  AVInputFormat *ifmt=av_find_input_format(“vfwcap”);//设备名(如vfwcap video4linux2)→确定fmt
  avformat_open_input(&pFormatCtx, 0, ifmt,NULL);//0表示在URL中指定打开第0个设备(在我自己计算机上即是摄像头设备)。
  
 在Windows平台上除了使用vfw设备作为输入设备之外,还可以使用DirectShow作为输入设备:
  AVFormatContext *pFormatCtx = avformat_alloc_context();
  AVInputFormat *ifmt=av_find_input_format(“dshow”);
  avformat_open_input(&pFormatCtx,“video=Integrated Camera”,ifmt,NULL) ;
  
  
avformat_open_input也可以不输入指定的AVInputFormat,而是缺省,通过第二个传参url自行判断

YUV420P(planar格式)在ffmpeg中存储是在struct AVFrame的data[]数组中
data[0]——-Y分量
data[1]——-U分量
data[2]——-V分量   ->故需要对data[012]依次存放才能是YUVp
linesize[]数组中保存的是对应通道的数据宽度
linesize[0]——-Y分量的宽度
linesize[1]——-U分量的宽度
linesize[2]——-V分量的宽度
注意: linesize[0]的值并不一定等于图片的宽度。我将一张1366*768的图片编码后,linesize[0]的值为1376,大概是为了内存对齐的缘故,我就没深究了。

读摄像头->pkt解码转换->目标frame->pFrameYUV->data[012]依次存放到目标文件(应该不算标准yuv文件,不可只有像素?)
#include "libavdevice/avdevice.h"
int main(int argc, char* argv[])  
{  
//1、prepare
	AVFormatContext *pFormatCtx;  
	int             i, videoindex;  
	AVCodecContext  *pCodecCtx;  
	AVCodec         *pCodec;  
	
	av_register_all();  
	avformat_network_init();    
	avdevice_register_all();  
	
	pFormatCtx = avformat_alloc_context();  
	AVInputFormat *ifmt=av_find_input_format("video4linux2");  
	if(avformat_open_input(&pFormatCtx,"/dev/video0",ifmt,NULL)!=0)		{printf("Couldn't open input stream./dev/video0\n");  return -1;  }  
   
	if(avformat_find_stream_info(pFormatCtx,NULL)<0)  {  printf("Couldn't find stream information.\n");  return -1;  }  
	
	videoindex=-1;  
	for(i=0; i<pFormatCtx->nb_streams; i++)   
		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)  {  videoindex=i;  break;  }  
	if(videoindex==-1)  {  printf("Couldn't find a video stream.\n");  return -1;  }  
	
	pCodecCtx=pFormatCtx->streams[videoindex]->codec;  
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);  //if(pCodec==NULL)  {  printf("Codec not found.\n");  return -1;  }  
	if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)  {  printf("Could not open codec.\n");  return -1;  }  
	
	AVFrame *pFrame,*pFrameYUV;  
	pFrame=av_frame_alloc();  
	pFrameYUV=av_frame_alloc();  
	unsigned char *out_buffer=(unsigned char *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));  
	avpicture_fill((AVPicture *)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);  //关联pFrameYUV和out_buffer
	//根据获取摄像头的宽高和指定的像素格式420,分配空间
	//printf("camera width=%d height=%d \n",pCodecCtx->width, pCodecCtx->height);  
	
			
	int ret, got_picture;  
	AVPacket *packet=(AVPacket *)av_malloc(sizeof(AVPacket));  
   
#if OUTPUT_YUV420P   
	FILE *fp_yuv=fopen("output.yuv","wb+");   //打开目标输出文件 
#endif    
   
	struct SwsContext *img_convert_ctx;  
	img_convert_ctx = sws_getContext(	pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, 
										pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);   



//2、读取--解码--转换-保存【YUV3个分量依次存放pFrameYUV->data[012]】--保存成yuv图片	  
	if(av_read_frame(pFormatCtx, packet)>=0){  
		if(packet->stream_index==videoindex){  
			ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet); 
			if(ret < 0){printf("Decode Error.\n");  return -1;  }  
			if(got_picture){  
				sws_scale(	img_convert_ctx, 
							(const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
							pFrameYUV->data, pFrameYUV->linesize);  

#if OUTPUT_YUV420P    
				int y_size=pCodecCtx->width*pCodecCtx->height;      
				fwrite(pFrameYUV->data[0],1,y_size,fp_yuv);    //Y     
				fwrite(pFrameYUV->data[1],1,y_size/4,fp_yuv);  //U    
				fwrite(pFrameYUV->data[2],1,y_size/4,fp_yuv);  //V    
#endif    
			}  
		}  
		av_free_packet(packet);    
	}  
	
//3、end	
	sws_freeContext(img_convert_ctx);  
#if OUTPUT_YUV420P   
	fclose(fp_yuv);  
#endif   
	av_free(out_buffer);  
	av_free(pFrameYUV);  
	avcodec_close(pCodecCtx);  
	avformat_close_input(&pFormatCtx);  
   
	return 0;  
}  

下面则较为严谨的版本:

//FFMPEG抓取摄像头数据保存为一张图片11yuv420.yuv(转换成)
//编译指令:	gcc usb_baocun_yuv.c -o video-frame.out -I/root/ffmpeg/include/ -L/root/ffmpeg/lib -lavcodec -lavformat -lavdevice -lavutil -lswscale -lSDL
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
#include "libavdevice/avdevice.h"
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>

char* input_name= "video4linux2";		//没有用到啊
char* file_name = "/dev/video0";		//当做文件读取
char* out_file = "11yuv420.yuv";//使用专用软件打开时如果打开时的格式与实际不符合会出错(按照错误的方式解读)

static AVFormatContext *ifmt_ctx;
static AVFormatContext *ofmt_ctx;

typedef struct StreamContext {//准备好?个码流解码/编码所需的AVCodecContext
	AVCodecContext *dec_ctx;
	AVCodecContext *enc_ctx;
} StreamContext;

static StreamContext *stream_ctx = NULL;
const char* jpeg_file = "cuc_view_encode.jpg";
unsigned int g_stream_index = -1;			//视频流的序号

void release_frame(AVFrame *frame)
{
	if(frame){
		av_frame_unref(frame);
		av_frame_free(&frame);
	}
}

void release_packet(AVPacket *packet)
{
	if(packet){
		av_packet_unref(packet);
		av_packet_free(&packet);
	}
}

//打开输入文件并获取其中各个码流的stream_ctx[?]->dec_ctx
int open_input_file()
{
	int i = 0;
	int ret = -1;
	int videoindex = -1;
	ifmt_ctx = NULL;//里面有?个码流--对应stream_ctx[?]

	if((ret = avformat_open_input (&ifmt_ctx, file_name, NULL, NULL)) < 0) {av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");return ret;}
	//av_dump_format(ifmt_ctx,0,file_name,0);
	if((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");return;}
	stream_ctx = av_mallocz_array(ifmt_ctx->nb_streams, sizeof(*stream_ctx));//申请连续的多个结构体
	if (!stream_ctx){return AVERROR(ENOMEM);}
	
	//获取stream_ctx[?]->dec_ctx
	for (i = 0; i < ifmt_ctx->nb_streams; i++)//获取stream_ctx[?]->dec_ctx
	{
		AVStream *stream = ifmt_ctx->streams[i];
		AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);//if (!dec) {av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);return AVERROR_DECODER_NOT_FOUND;}
		
		AVCodecContext *codec_ctx = avcodec_alloc_context3(dec);//if (!codec_ctx) {av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i);return AVERROR(ENOMEM);}
		
		ret = avcodec_parameters_to_context(codec_ctx,stream->codecpar);//if (ret < 0) {av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context for stream #%u\n", i);return ret;}
		/* Reencode video & audio and remux subtitles etc. */
		if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO || codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO)
		{
			if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)	codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, stream, NULL);
			ret = avcodec_open2(codec_ctx, dec, NULL);//if (ret < 0) {av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);return ret;}
		}
		stream_ctx->dec_ctx = codec_ctx;
	}
	
	videoindex= -1;
	for(i=0; i<ifmt_ctx->nb_streams; i++)
		if(ifmt_ctx->streams[i]->codecpar->codec_type==AVMEDIA_TYPE_VIDEO){videoindex=i;break;}
	if(videoindex==-1)	{printf("Didn't find a video stream.\n");return -1;}
	g_stream_index = videoindex;

	printf("video index is %d, nb_stream is %d\n", videoindex, ifmt_ctx->nb_streams);
	printf("picture width   =  %d \n", stream_ctx[videoindex].dec_ctx->width);
	printf("picture height  =  %d \n", stream_ctx[videoindex].dec_ctx->height);
	printf("Pixel Format  =  %d \n\n\n\n", stream_ctx[videoindex].dec_ctx->pix_fmt);

	return ret;
}

//
int open_output_file()
{
	AVStream *out_stream;
	AVStream *in_stream;
	AVCodecContext *dec_ctx, *enc_ctx;
	AVCodec *encoder;
	unsigned int i;
	int ret = -1;

	ofmt_ctx = NULL;
	avformat_alloc_output_context2(&ofmt_ctx,NULL,NULL,jpeg_file);//if (!ofmt_ctx) {av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");return AVERROR_UNKNOWN;}
	//printf("codec is is %d---%d----",ofmt_ctx->video_codec_id,ofmt_ctx->oformat->video_codec,AV_CODEC_ID_MJPEG);
	
	for (i = 0; i < ifmt_ctx->nb_streams; i++) 
	{
		out_stream = avformat_new_stream(ofmt_ctx, NULL);//if (!out_stream) {av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");return AVERROR_UNKNOWN;}

		in_stream = ifmt_ctx->streams[i];
		dec_ctx = stream_ctx[i].dec_ctx;

		if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO || dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO)
		{
			encoder = avcodec_find_encoder( ofmt_ctx->oformat->video_codec);
			//if (!encoder)	{av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");return AVERROR_INVALIDDATA;}
			enc_ctx = avcodec_alloc_context3(encoder);
			//if(!enc_ctx){av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");return AVERROR(ENOMEM);}

			if(dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
			{
				enc_ctx->height = dec_ctx->height;						//输出的长宽 样本比率均搬自解码AVcodecontext
				enc_ctx->width = dec_ctx->width;
				enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
				
				
				/* take first format from list of supported formats */
				//enum AVPixelFormat 中 AV_PIX_FMT_NONE=-1 AV_PIX_FMT_YUV420P =0  AV_PIX_FMT_YUYV422 =1
				if (encoder->pix_fmts)
					enc_ctx->pix_fmt = encoder->pix_fmts[0];//若目标格式默认首个支持的格式不是420P则使用自身默认支持的第一个格式
				else
					enc_ctx->pix_fmt = dec_ctx->pix_fmt;//若目标格式默认首个支持的格式是420P则沿用解码格式
				
				
				/* video time_base can be set to whatever is handy and supported by encoder */
				enc_ctx->time_base = av_inv_q(dec_ctx->framerate);
			}
			else 
			{
				enc_ctx->sample_rate = dec_ctx->sample_rate;
				enc_ctx->channel_layout = dec_ctx->channel_layout;
				enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
				/* take first format from list of supported formats */
				enc_ctx->sample_fmt=encoder->sample_fmts[0];
				enc_ctx->time_base=(AVRational){1,enc_ctx->sample_rate};
			}

			/*Third parameter can be used to pass settings to encoder */
			ret = avcodec_open2(enc_ctx,encoder,NULL);//if(ret<0) {av_log(NULL,AV_LOG_ERROR,"Cannot open video encoder for stream #%u\n",i);return ret;}
			ret = avcodec_parameters_from_context(out_stream->codecpar, enc_ctx);/*(dst, const src)*/		//if(ret<0){av_log(NULL,AV_LOG_ERROR,"Failed to copy encoder parameters to output stream #%u\n",i);return ret;}
			if(ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)	enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

			out_stream->time_base = enc_ctx->time_base;
			stream_ctx[i].enc_ctx = enc_ctx;
		}
		/*
			else if (dec_ctx->codec_type==AVMEDIA_TYPE_UNKNOWN)
			{av_log(NULL,AV_LOG_FATAL,"Elementary stream #%d is of unknown type, cannot proceed\n",i);return AVERROR_INVALIDDATA;}
			else
			{
				// if this stream must be remuxed 
				ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
				if(ret<0){av_log(NULL,AV_LOG_ERROR,"Copying parameters for stream #%u failed\n",i);return ret;}
				out_stream->time_base = in_stream->time_base;
			}
		*/
	}
	//av_dump_format(ofmt_ctx,0,jpeg_file,1);

	if(!(ofmt_ctx->oformat->flags&AVFMT_NOFILE))
		ret = avio_open(&ofmt_ctx->pb, jpeg_file, AVIO_FLAG_WRITE);//if(ret<0) {av_log(NULL,AV_LOG_ERROR,"Could not open output file '%s'",jpeg_file);return ret;}
	
	return ret;
}

int save_to_jpeg(const AVFrame * frame)
{
	AVPacket *packet;
	int ret;
	unsigned int i;
	AVCodecContext * enc_ctx = stream_ctx[g_stream_index].enc_ctx;

	/* init muxer, write output file header */
	ret = avformat_write_header(ofmt_ctx,NULL);
	if(ret<0) {av_log(NULL,AV_LOG_ERROR,"Error occurred when opening output file\n");return ret;}

	packet = av_packet_alloc();
	av_init_packet(packet);

	ret = avcodec_send_frame(enc_ctx, frame);//if (ret < 0) {fprintf(stderr, "Error sending a frame for encoding\n");exit(1);}

	ret = avcodec_receive_packet(enc_ctx, packet);//if(ret){printf("receive packet is error\n");return -1;}
	packet->stream_index = g_stream_index;
	av_packet_rescale_ts(packet, enc_ctx->time_base,ofmt_ctx->streams[g_stream_index]->time_base);
	printf("stream index is %d\n", packet->stream_index);
	printf("write frame\n");
	ret = av_write_frame(ofmt_ctx, packet);

	//Write Trailer
	av_write_trailer(ofmt_ctx);
	release_packet(packet);
	printf("Encode Successful.\n");
	return 0;
}

int captureOneFrame(void)
{
	struct SwsContext *sws_ctx;
	AVPacket *packet;
	AVFrame *frame;
	AVFrame *yuvFrame;
	FILE *fp;
	int i;
	int ret = 0;
	enum AVPixelFormat dst_pix_fmt = AV_PIX_FMT_YUV420P;

	uint8_t *dst_data[4];
	int dst_linesize[4];
	int dst_bufsize;
	uint8_t *dst_buffer= NULL;
	AVCodecContext * dec_ctx = stream_ctx[g_stream_index].dec_ctx;
	int dst_bpp=av_get_bits_per_pixel(av_pix_fmt_desc_get(dst_pix_fmt));

	if((fp=fopen(out_file,"wb"))<0){printf("open frame data file failed\n");return;}

	sws_ctx = sws_getContext(	dec_ctx->width,dec_ctx->height,dec_ctx->pix_fmt,
								dec_ctx->width,dec_ctx->height,dst_pix_fmt,
								SWS_BILINEAR,NULL,NULL,NULL);

	dst_bufsize = av_image_alloc(dst_data,dst_linesize,dec_ctx->width,dec_ctx->height,dst_pix_fmt,1);

	packet= av_packet_alloc();
	av_read_frame(ifmt_ctx, packet);

	frame = av_frame_alloc();
	yuvFrame = av_frame_alloc();

	if ((ret=avcodec_send_packet(dec_ctx,packet)) < 0) {fprintf(stderr, "Error sending a packet for decoding\n");exit(1);}
	
	ret = avcodec_receive_frame(dec_ctx, frame);
	if(ret)	{printf("decoding is error\n");return -1;}


	yuvFrame = av_frame_clone(frame);
	yuvFrame->format = dst_pix_fmt;
//    av_image_fill_arrays(src_data, src_linesize, packet->data,dec_ctx->pix_fmt , dec_ctx->width, dec_ctx->height ,1 ); 
	sws_scale(sws_ctx,(const uint8_t * const *)frame->data,frame->linesize,0,dec_ctx->height,dst_data,dst_linesize);
//frame->data→→→sws→→→dst_data→→→copy→→dst_buff
	dst_buffer = av_malloc(dec_ctx->width*dec_ctx->height*dst_bpp/8);
	av_image_copy_to_buffer(dst_buffer,dec_ctx->width*dec_ctx->height*dst_bpp/8,(const uint8_t * const *)dst_data,(const int *)dst_linesize,dst_pix_fmt,dec_ctx->width,dec_ctx->height,1);

	av_image_fill_arrays(yuvFrame->data, yuvFrame->linesize, dst_buffer, yuvFrame->format, yuvFrame->width, yuvFrame->height ,1 );//关联frame和数据buff

	fwrite(dst_buffer,1,dec_ctx->width*dec_ctx->height*dst_bpp/8,fp);

	fclose(fp);
	sws_freeContext(sws_ctx);
	save_to_jpeg(yuvFrame);
	if(dst_buffer){av_free(dst_buffer);}
	release_frame(frame);
	release_frame(yuvFrame);
	release_packet(packet);
	av_freep(&dst_data[0]);
	return ret;
}

void close_stream()
{
	int i = 0;
	if(ifmt_ctx)
		for(i = 0; i < ifmt_ctx->nb_streams; i++){avcodec_close(stream_ctx[i].dec_ctx);avcodec_free_context(&stream_ctx[i].dec_ctx);}

	if(ofmt_ctx)
		for(i = 0;i<ifmt_ctx->nb_streams;i++){avcodec_close(stream_ctx[i].enc_ctx);avcodec_free_context(&stream_ctx[i].enc_ctx);}

	if(stream_ctx){av_free(stream_ctx);}
	if(ofmt_ctx && !(ofmt_ctx->oformat->flags&AVFMT_NOFILE)){avio_closep(&ofmt_ctx->pb);}
	if(ofmt_ctx){avformat_free_context(ofmt_ctx);}
	if(ifmt_ctx){avformat_close_input(&ifmt_ctx);avformat_free_context(ifmt_ctx);}
}

int main(void)
{
	//av_log_set_level(AV_LOG_ERROR);
	avdevice_register_all();
	av_register_all();
	if(open_input_file() <0){printf("Opening input file is fail\n"); return -1;}
	if(open_output_file() <0){printf("Opening output file is fail\n"); return -1;}
	captureOneFrame();
	close_stream();
	return 0;
}

下面代码修改自qinyi_style-FFMPEG音视频同步-读取摄像头并编码封装保存

录像10秒:在前面单帧的基础上的限时修改:
  get_video_frame1函数中的av_compare_ts函数中比较 ①接着要读的帧ost->next_pts 和 ②预设时间 进行比较,到预定时间就结束

//  gcc video2flv.c -o video2flv.out -I/root/ffmpeg/include/ -L/root/ffmpeg/lib -lavcodec -lavformat -lavdevice -lavutil -lswscale -lSDL -lswresample
//  ./video2flv.out
/*
	修改自qinyi_style   FFMPEG音视频同步-读取摄像头并编码封装保存
		https://blog.csdn.net/quange_style/article/details/90082391
*/
#include <stdlib.h>  
#include <stdio.h>  
#include <string.h>  
#include <math.h>  
  
#include <libavutil/avassert.h>  
#include <libavutil/channel_layout.h>  
#include <libavutil/opt.h>  
#include <libavutil/mathematics.h>  
#include <libavutil/timestamp.h>  
#include <libavformat/avformat.h>  
#include <libswscale/swscale.h>  
#include <libswresample/swresample.h>  
  
#define STREAM_DURATION   10.0  //视频预设总时长 
#define STREAM_FRAME_RATE 25 /* 25 images/s */  
#define STREAM_PIX_FMT    AV_PIX_FMT_YUV420P /* default pix_fmt */  
  

  
typedef struct OutputStream {  
    AVStream *st;  
    AVCodecContext *enc;  
  
    int64_t next_pts;  
    int samples_count;  
  
    AVFrame *frame;  
    AVFrame *tmp_frame;  
  
    float t, tincr, tincr2;  
  
    struct SwsContext *sws_ctx;  
    struct SwrContext *swr_ctx;  
} OutputStream;  //相较于标准的结构体少了后面的部分,只有前面的几个参数
  
  
typedef struct IntputDev {  
	AVCodecContext  *pCodecCtx;  		//pCodecCtx=v_ifmtCtx->streams[videoindex]->codec;  
	AVCodec         *pCodec;  			//pCodec=avcodec_find_decoder(pCodecCtx->codec_id);  
	AVFormatContext *v_ifmtCtx;  		//avformat_alloc_context +  avformat_open_input(&v_ifmtCtx,"/dev/video0",ifmt,NULL)
	int  videoindex;  
	struct SwsContext *img_convert_ctx;  
	AVPacket *in_packet;  		//(AVPacket *)av_malloc(sizeof(AVPacket)); -->av_read_frame得到内容
	AVFrame *pFrame,*pFrameYUV;  //av_frame_alloc---->解码得到pFrame→→格式转换→→pFrameYUV	avpicture_fill((AVPicture *)pFrameYUV, out_buffer..) 
}IntputDev;  
  
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)  //log日志相关 打印一些信息 	Definition at line 70 of file muxing.c.
{  
    AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;  
  
	printf("pts:%s pts_time:%s duration:%s\n",av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),av_ts2str(pkt->duration));
	/*
    printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",  
           av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),  
           av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),  
           av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),  
           pkt->stream_index);  //这里pts和dts都源自编码前frame.pts 而duration没有被赋值,是初始化时的0
	*/
}  
  
static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)  //Definition at line 81 of file muxing.c.
//pkt写入之前要先处理pkt->pts和pkt->stream_index
{  
    /* rescale output packet timestamp values from codec(帧序号) to stream timebase */  
    av_packet_rescale_ts(pkt, *time_base, st->time_base);  
			//例如:av_packet_rescale_ts将pkt->pts从帧序号33→时间戳118800
    pkt->stream_index = st->index;  
  
    /* Write the compressed frame to the media file. */  
    log_packet(fmt_ctx, pkt);  
    return av_interleaved_write_frame(fmt_ctx, pkt);  
}  
  
static void add_stream(OutputStream *ost, AVFormatContext *oc,  AVCodec **codec,  enum AVCodecID codec_id) 
//1.*codec = avcodec_find_encoder(codec_id);   2.ost->st = avformat_new_stream(oc, NULL); 		3.ost->enc = c = avcodec_alloc_context3(*codec); 4.填充AVCodecContext *c;  
{  
    AVCodecContext *c;  
    int i;  
  
    *codec = avcodec_find_encoder(codec_id);  
    if (!(*codec)) {  fprintf(stderr, "Could not find encoder for '%s'\n",  avcodec_get_name(codec_id));  exit(1);  }  
  
    ost->st = avformat_new_stream(oc, NULL);  
    if (!ost->st) {  fprintf(stderr, "Could not allocate stream\n");  exit(1);  }  
	
    ost->st->id = oc->nb_streams-1;  
    c = avcodec_alloc_context3(*codec);  
    if (!c) {  fprintf(stderr, "Could not alloc an encoding context\n");  exit(1);  }  
	ost->enc = c;  
  
	switch ((*codec)->type) {  
	case AVMEDIA_TYPE_AUDIO:  
		c->sample_fmt  = (*codec)->sample_fmts ?  
			(*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;  
		c->bit_rate    = 64000;  
		c->sample_rate = 44100;  
		if ((*codec)->supported_samplerates) {  
			c->sample_rate = (*codec)->supported_samplerates[0];  
			for (i = 0; (*codec)->supported_samplerates[i]; i++) {  
				if ((*codec)->supported_samplerates[i] == 44100)  
					c->sample_rate = 44100;  
			}  
		}  
		c->channels        = av_get_channel_layout_nb_channels(c->channel_layout);  
		c->channel_layout = AV_CH_LAYOUT_STEREO;  
		if ((*codec)->channel_layouts) {  
			c->channel_layout = (*codec)->channel_layouts[0];  
			for (i = 0; (*codec)->channel_layouts[i]; i++) {  
				if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)  
					c->channel_layout = AV_CH_LAYOUT_STEREO;  
			}  
		}  
		c->channels        = av_get_channel_layout_nb_channels(c->channel_layout);  
		ost->st->time_base = (AVRational){ 1, c->sample_rate };  
		break;  
  
	case AVMEDIA_TYPE_VIDEO:  
		c->codec_id = codec_id;  
  
		c->bit_rate = 400000;  
		/* Resolution must be a multiple of two. */  
		c->width    = 640;  
		c->height   = 480;  
		/* timebase: This is the fundamental unit of time (in seconds) in terms 
		 * of which frame timestamps are represented. For fixed-fps content, 
		 * timebase should be 1/framerate and timestamp increments should be 
		 * identical to 1. */  
		ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };  
		c->time_base       = ost->st->time_base;  
  
		c->gop_size      = 12; /* emit one intra frame every twelve frames at most */  
		c->pix_fmt       = STREAM_PIX_FMT;  
		if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {  
			/* just for testing, we also add B-frames */  
			c->max_b_frames = 2;  
		}  
		if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {  
			/* Needed to avoid using macroblocks in which some coeffs overflow. 
			 * This does not happen with normal video, it just happens here as 
			 * the motion of the chroma plane does not match the luma plane. */  
			c->mb_decision = 2;  
		}  
	break;  
  
	default:  
		break;  
	}  
  
	/* Some formats want stream headers to be separate. */  
	if (oc->oformat->flags & AVFMT_GLOBALHEADER)  
		c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;  //oc的格式需要分离的文件头→→c的格式也需要分离的文件头
}  






/**************************************************************************/  
/* video output */  
  
static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)  //申请一个指定长宽像素的AVFrame
{  
	AVFrame *picture;  
	int ret;  
  
	picture = av_frame_alloc();  //this only allocates the AVFrame itself, not the data buffers.
	//Those must be allocated through other means, e.g. with av_frame_get_buffer() or manually.
	if (!picture)  
		return NULL;  
  
	picture->format = pix_fmt;  
	picture->width  = width;  
	picture->height = height;  
  
	/* allocate the buffers for the frame data */  
	ret = av_frame_get_buffer(picture, 32);  //为音频或视频数据分配新的缓冲区。
	if (ret < 0) {  
		fprintf(stderr, "Could not allocate frame data.\n");  
		exit(1);  
	}  
  
	return picture;  
}  
  
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)  
//1.avcodec_open2(c, codec, &opt);    2.allocate and init a re-usable frame 3.prepar ost->tmp_frame 4.avcodec_parameters_from_context
{  
	int ret;  
	AVCodecContext *c = ost->enc;  
	AVDictionary *opt = NULL;  
  
	av_dict_copy(&opt, opt_arg, 0);  
  
	/* open the codec */  
	ret = avcodec_open2(c, codec, &opt);  
	av_dict_free(&opt);  
	if (ret < 0) {  
		fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));  
		exit(1);  
	}  
  
	/* allocate and init a re-usable frame */  
	ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);  
	if (!ost->frame) {  fprintf(stderr, "Could not allocate video frame\n");  exit(1);  }  
	//printf("ost->frame alloc success fmt=%d w=%d h=%d\n",c->pix_fmt,c->width, c->height);  
  
  
	/* If the output format is not YUV420P, then a temporary YUV420P 
	 * picture is needed too. It is then converted to the required 
	 * output format. */  
	ost->tmp_frame = NULL;  
	if (c->pix_fmt != AV_PIX_FMT_YUV420P) {  
		ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);  
		if (!ost->tmp_frame) {  
			fprintf(stderr, "Could not allocate temporary picture\n");  
			exit(1);  
		}  
	}  
  
	/* copy the stream parameters to the muxer */  
	ret = avcodec_parameters_from_context(ost->st->codecpar, c);  
	if (ret < 0) {  
		fprintf(stderr, "Could not copy the stream parameters\n");  
		exit(1);  
	}  
}  
  


// encode one video frame and send it to the muxer  return 1 when encoding is finished, 0 otherwise 
static int write_video_frame1(AVFormatContext *oc, OutputStream *ost,AVFrame *frame)  
//1.avcodec_encode_video2 	2.调用write_frame函数
{  
	int ret;  
	AVCodecContext *c;  
	int got_packet = 0;  
	AVPacket pkt = { 0 };  
  
	if(frame==NULL)  return 1;  
  
	c = ost->enc;  
	av_init_packet(&pkt);  
  
  
	/* encode the image */  
	ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);  //pkt.pts搬自frame.pts
	if (ret < 0) {  fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));  exit(1);  }  
  
	printf("---video- pkt.pts=%s     ",av_ts2str(pkt.pts));  //av_ts2str即将包含时间戳的int64_t变成char*buf    起始就是输出帧序号
	if (pkt.pts == 0) printf("----st.num=%d st.den=%d codec.num=%d codec.den=%d---------\n",ost->st->time_base.num,ost->st->time_base.den,  c->time_base.num,c->time_base.den);  
	//输出流的AVRational 和 编码器的AVRational
	
	if (got_packet) {  ret = write_frame(oc, &c->time_base, ost->st, &pkt);  }
	else {  ret = 0;  }  
  
	if (ret < 0) {  fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));  exit(1);  }  
  
	return (frame || got_packet) ? 0 : 1;  
}  
  
  
static AVFrame *get_video_frame1(OutputStream *ost,IntputDev* input,int *got_pic)  
//1.av_frame_make_writable		2.av_read_frame--avcodec_decode_video2---sws_scale
//数据流:input->in_packet----input->pFrame----ost->frame
{  
  
	int ret, got_picture;  
	AVCodecContext *c = ost->enc;  
	AVFrame * ret_frame=NULL;  
	if (av_compare_ts(ost->next_pts, c->time_base,  
					  STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)  //时间戳超过指定的视频持续时间STREAM_DURATION就return
		return NULL;  
  
	/* when we pass a frame to the encoder, it may keep a reference to it 
	 * internally; make sure we do not overwrite it here */  
	if (av_frame_make_writable(ost->frame) < 0)  
		exit(1);  
	  
  
	if(av_read_frame(input->v_ifmtCtx, input->in_packet)>=0){  
		if(input->in_packet->stream_index==input->videoindex){  
			ret = avcodec_decode_video2(input->pCodecCtx, input->pFrame, &got_picture, input->in_packet);  
			*got_pic=got_picture;  
  
			if(ret < 0){  
				printf("Decode Error.\n");  
				av_free_packet(input->in_packet);  
				return NULL;  
			}  
			if(got_picture){  
				sws_scale(input->img_convert_ctx, (const unsigned char* const*)input->pFrame->data, input->pFrame->linesize, 0, input->pCodecCtx->height, ost->frame->data,  ost->frame->linesize);  
				ost->frame->pts =ost->next_pts++;  
				ret_frame= ost->frame;  
			}  
		}  
		av_free_packet(input->in_packet);  
	}  
	return ret_frame;  
}  

static void close_stream(AVFormatContext *oc, OutputStream *ost)  
{  
	avcodec_free_context(&ost->enc);  
	av_frame_free(&ost->frame);  
	av_frame_free(&ost->tmp_frame);  
	sws_freeContext(ost->sws_ctx);  
	swr_free(&ost->swr_ctx);  
}  
  
/**************************************************************/  
/* media file output */  
  
int main(int argc, char **argv)  
{  	
	  		
	int ret;  
	int have_video = 0;  
	int encode_video = 0;   
  
//********add camera read***********//  //输入相关
	IntputDev video_input = { 0 };  //总输入


	avdevice_register_all();  
	av_register_all();
	
	AVCodecContext  *pCodecCtx;  
	AVCodec         *pCodec;  
	AVFormatContext *v_ifmtCtx = avformat_alloc_context();  
	
	AVInputFormat *ifmt=av_find_input_format("video4linux2");  
	if(avformat_open_input(&v_ifmtCtx,"/dev/video1",ifmt,NULL) != 0){
		printf("Couldn't open input stream./dev/video1\n");
		return -1;
	}  
	 
	if(avformat_find_stream_info(v_ifmtCtx,NULL)<0){
		printf("Couldn't find stream information.\n");
		return -1;
	}  


	int videoindex=-1;  
	for(int i=0; i<v_ifmtCtx->nb_streams; i++){//find video stream index
		if(v_ifmtCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
			videoindex=i;
			break;
		}  
		if(i == v_ifmtCtx->nb_streams-1){
			printf("Couldn't find a video stream.\n");
			return -1;
		}
	}		
	
	pCodecCtx=v_ifmtCtx->streams[videoindex]->codec;  
	printf("pCodecCtx->width=%d pCodecCtx->height=%d \n",pCodecCtx->width, pCodecCtx->height); 
	
	if ((pCodec=avcodec_find_decoder(pCodecCtx->codec_id)) == NULL){
		printf("Codec not found.\n");  
		return -1;  
	}
	
	if(avcodec_open2(pCodecCtx, pCodec,NULL)<0){
		printf("Could not open codec.\n");return -1;
	}  
  
  
  
	AVFrame *pFrame 	= av_frame_alloc();  
	AVFrame *pFrameYUV 	= av_frame_alloc();   
	unsigned char *out_buffer=(unsigned char *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));  
	avpicture_fill((AVPicture *)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);  

	struct SwsContext *img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);   
	AVPacket *in_packet = NULL;
	if ((in_packet = (AVPacket *)av_malloc(sizeof(AVPacket))) == NULL){
		printf("error while av_malloc");
		return -1;
	}  


  
	//关联IntputDev video_input和其子成员
		video_input.img_convert_ctx=img_convert_ctx;  
		video_input.in_packet=in_packet;  
		video_input.pCodecCtx=pCodecCtx;  
		video_input.pCodec=pCodec;  
		video_input.v_ifmtCtx=v_ifmtCtx;  	//AVFormatContext *
		video_input.videoindex=videoindex;  
		video_input.pFrame=pFrame;  		//packet解码得到
		video_input.pFrameYUV=pFrameYUV;  	//pFrame->sws->pFrameYUV
	


  
//***********************输出
	OutputStream video_st = { 0 }; //即ost
	const char *filename = "video2out.flv";
	
	AVFormatContext *oc;  
	AVCodec *video_codec;//输出侧的编码器   而341行的AVCodec         *pCodec;  为输入侧的解码器
	avformat_alloc_output_context2(&oc, NULL, NULL, filename); 
	if (!oc) {printf("Could not deduce output format from file extension: using MPEG.\n");avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);  return 1;}  
	
	AVOutputFormat *fmt = oc->oformat;  //AVFormatContext-> AVOutputFormat *oformat
	/* Add the audio and video streams using the default format codecs and initialize the codecs. */  
	if (fmt->video_codec != AV_CODEC_ID_NONE) {  
		add_stream(&video_st, oc, &video_codec, fmt->video_codec);  //OutputStream *ost, AVFormatContext *oc,  AVCodec **codec,  enum AVCodecID codec_id
		/* Add an output stream(ost->st) && add an AVCodecContext(即ost->enc)并填充 */
		//1.*codec = avcodec_find_encoder(codec_id);   2.ost->st = avformat_new_stream(oc, NULL); 		
		//3.ost->enc = c = avcodec_alloc_context3(*codec); 4.填充AVCodecContext *c;  			
		have_video = 1;  
		encode_video = 1;  
	}  

  
	/* Now that all the parameters are set, we can open the audio and  video codecs and allocate the necessary encode buffers. */  
	if (have_video)  open_video(oc, video_codec, &video_st, NULL);  
	//1.avcodec_open2(c, codec, &opt);    2.allocate and init a re-usable frame 3.prepar ost->tmp_frame 4.avcodec_parameters_from_context
  
	av_dump_format(oc, 0, filename, 1);  
  
	/* open the output file, if needed 					creat AVIOcontext*/  
	if (!(fmt->flags & AVFMT_NOFILE)) {  
		if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0){
			fprintf(stderr, "Could not open '%s': %s\n", filename,  av_err2str(ret));
			return -1;
		}  
	}  
  
	/* Write the stream header, if any. */  
	if (avformat_write_header(oc, NULL) < 0){
		fprintf(stderr, "Error occurred when opening output file: %s\n",  av_err2str(ret));
		return 1;
	}  
  
	int got_pic;  
	while (encode_video ) {  
		/* select the stream to encode */  
		AVFrame *frame=get_video_frame1(&video_st,&video_input,&got_pic);  //frame.pts源自pkt.pts 又源自0不停地+1 即帧序号
		//0.av_compare_tsc超时结束		1.av_frame_make_writable		2.av_read_frame--avcodec_decode_video2---sws_scale->->->得到解码后的->frame
		if(!got_pic)  
		{  
			usleep(10000);  //单位是微秒(百万分之一秒)。
			continue;  
		}  
		encode_video = !write_video_frame1(oc, &video_st,frame);  //编码预定时间完成后则返回1→while循环可以结束
	}  
  
  
 
	av_write_trailer(oc);  


//end  
	sws_freeContext(video_input.img_convert_ctx);  
  
	avcodec_close(video_input.pCodecCtx);  
	av_free(video_input.pFrameYUV);  
	av_free(video_input.pFrame);      
	avformat_close_input(&video_input.v_ifmtCtx);  
	  
  
	/* Close each codec. */  
	if (have_video)  close_stream(oc, &video_st);  
  
	if (!(fmt->flags & AVFMT_NOFILE))  avio_closep(&oc->pb);  /* Close the output file. */  
  
	/* free the stream */  
	avformat_free_context(oc);  
  
	return 0;  
}  
发布了81 篇原创文章 · 获赞 1 · 访问量 2905

猜你喜欢

转载自blog.csdn.net/qq_42024067/article/details/103520835
今日推荐