ffmpeg-c代码推流rtmp

1.本地指定文件→rtmp推流→URL

最简单的基于FFmpeg的推流器(以推送RTMP为例)本文记录一个最简单的基于FFmpeg的推流器(simplest ffmpeg streamer)。推流器的作用就是将本地的视频数据推送至流媒体服务器。

在这个推流器的基础上可以进行多种方式的修改,实现各式各样的推流器。例如:

  • 将输入文件改为网络流URL,可以实现转流器。
  • 将输入的文件改为回调函数(内存读取)的形式,可以推送内存中的视频数据。
  • 将输入文件改为系统设备(通过libavdevice),同时加上编码的功能,可以实现实时推流器(现场直播)。
    PS:本程序并不包含视频转码的功能。

在这里插入图片描述

1.封装格式:
  RTMP采用(指定)的封装格式是FLV,类似,UDP推送时封装格式为mpegts
2.延时:
  发送流媒体的数据的时候需要延时。不然的话,FFmpeg处理数据速度很快,瞬间就能把所有的数据发送出去,流媒体服务器是接受不了的。因此需要按照视频实际的帧率发送数据【视频帧与帧之间采用了av_usleep()函数休眠】
3.PTS/DTS问题
  没有封装格式的裸流(例如H.264裸流)是不包含PTS、DTS这些参数的。在发送这种数据的时候,需要自己计算并写入AVPacket的PTS,DTS,duration等参数
在这里插入图片描述

以下为实现代码:

/**
 * 本例子实现了推送本地视频至流媒体服务器(以RTMP为例)。
 * 是使用FFmpeg进行流媒体推送最简单的教程。
**/
 
#include <stdio.h>
 
 
#include <libavformat/avformat.h>
#include <libavutil/mathematics.h>
#include <libavutil/time.h>

 
int main(int argc, char* argv[])
{
	AVOutputFormat *ofmt = NULL;
	AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
	AVPacket pkt;
	const char *in_filename, *out_filename;
	int ret, i;
	int videoindex=-1;
	int frame_index=0;
	int64_t start_time=0;
	//in_filename  = "cuc_ieschool.mov";
	//in_filename  = "cuc_ieschool.mkv";
	//in_filename  = "cuc_ieschool.ts";
	//in_filename  = "cuc_ieschool.mp4";
	//in_filename  = "cuc_ieschool.h264";
	in_filename  = "cuc_ieschool.flv";//输入URL(Input file URL)
	//in_filename  = "shanghai03_p.h264";
	
	out_filename = "rtmp://localhost/publishlive/livestream";//输出 URL(Output URL)[RTMP]
	//out_filename = "rtp://233.233.233.233:6666";//输出 URL(Output URL)[UDP]
 
	av_register_all();
	avformat_network_init();//Network
	//输入(Input)
	if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
		printf( "Could not open input file.");
		goto end;
	}
	if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
		printf( "Failed to retrieve input stream information");
		goto end;
	}
 
	for(i=0; i<ifmt_ctx->nb_streams; i++) 
		if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
			videoindex=i;
			break;
		}
 
	av_dump_format(ifmt_ctx, 0, in_filename, 0);
 
//输出(Output)
	
	avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", out_filename); //RTMP
	//avformat_alloc_output_context2(&ofmt_ctx, NULL, "mpegts", out_filename);//UDP
	/*
		封装格式
		RTMP采用的封装格式是FLV。因此在指定输出流媒体的时候需要指定其封装格式为“flv”。
		同理,其他流媒体协议也需要指定其封装格式。例如采用UDP推送流媒体的时候,可以指定其封装格式为“mpegts”。
	*/
 
	if (!ofmt_ctx) {
		printf( "Could not create output context\n");
		ret = AVERROR_UNKNOWN;
		goto end;
	}
	ofmt = ofmt_ctx->oformat;//AVFormatContext->AVOutputFormat *ofmt
	for (i = 0; i < ifmt_ctx->nb_streams; i++) {
		//根据输入流创建输出流(Create output AVStream according to input AVStream)
		AVStream *in_stream = ifmt_ctx->streams[i];
		AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
		if (!out_stream) {
			printf( "Failed allocating output stream\n");
			ret = AVERROR_UNKNOWN;
			goto end;
		}
		//复制AVCodecContext的设置(Copy the settings of AVCodecContext)
		ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
		if (ret < 0) {
			printf( "Failed to copy context from input to output stream codec context\n");
			goto end;
		}
		out_stream->codec->codec_tag = 0;
		if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
			out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
	}

	av_dump_format(ofmt_ctx, 0, out_filename, 1);
	
	//打开输出URL(Open output URL)
	if (!(ofmt->flags & AVFMT_NOFILE)) {
		ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
		if (ret < 0) {
			printf( "Could not open output URL '%s'", out_filename);
			goto end;
		}
	}
	//写文件头(Write file header)
	ret = avformat_write_header(ofmt_ctx, NULL);
	if (ret < 0) {
		printf( "Error occurred when opening output URL\n");
		goto end;
	}
 
	start_time=av_gettime();
	while (1) {
		AVStream *in_stream, *out_stream;
		//获取一个AVPacket(Get an AVPacket)
		ret = av_read_frame(ifmt_ctx, &pkt);
		if (ret < 0)
			break;
		
		
		/*
			PTS/DTS问题
			没有封装格式的裸流(例如H.264裸流)是不包含PTS、DTS这些参数的。
			在发送这种数据的时候,需要自己计算并写入AVPacket的PTS,DTS,duration等参数。这里还没有深入研究,简单写了一点代码,如下所示。
		*/
		
		//FIX:No PTS (Example: Raw H.264)
		//Simple Write PTS
		if(pkt.pts==AV_NOPTS_VALUE){
			//Write PTS
			AVRational time_base1=ifmt_ctx->streams[videoindex]->time_base;
			//Duration between 2 frames (us)
			int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(ifmt_ctx->streams[videoindex]->r_frame_rate);	//
			//#define AV_TIME_BASE   1000,000   应该就是秒和微秒之间的10^6倍率
			//#define AV_TIME_BASE_Q   (AVRational){1, AV_TIME_BASE}

			//Parameters
			pkt.pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE);
			pkt.dts=pkt.pts;
			pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE);	//Duration of this packet in AVStream->time_base units, 0 if unknown.
																							//Equals next_pts - this_pts in presentation order.
		}
		//Important:Delay
		if(pkt.stream_index==videoindex){
			AVRational time_base=ifmt_ctx->streams[videoindex]->time_base;
			AVRational time_base_q={1,AV_TIME_BASE};
			int64_t pts_time = av_rescale_q(pkt.dts, time_base, time_base_q);
			int64_t now_time = av_gettime() - start_time;
			if (pts_time > now_time)
				av_usleep(pts_time - now_time);  //单帧所占据的时间间隔-本帧已用时间间隔 = 需要等待的时间
			/*
				延时
				发送流媒体的数据的时候需要延时。不然的话,FFmpeg处理数据速度很快,瞬间就能把所有的数据发送出去,流媒体服务器是接受不了的。
				因此需要按照视频实际的帧率发送数据。本文记录的推流器在视频帧与帧之间采用了av_usleep()函数休眠的方式来延迟发送。
			*/
 
		}
 
		in_stream  = ifmt_ctx->streams[pkt.stream_index];
		out_stream = ofmt_ctx->streams[pkt.stream_index];
		/* copy packet */
		//转换PTS/DTS(Convert PTS/DTS)【输入流的pts转化为输出流的pts】
		pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
		pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
		pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
		pkt.pos = -1;
		//Print to Screen
		if(pkt.stream_index==videoindex){
			printf("Send %8d video frames to output URL\n",frame_index);
			frame_index++;
		}
		//ret = av_write_frame(ofmt_ctx, &pkt);
		ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
 
		if (ret < 0) {
			printf( "Error muxing packet\n");
			break;
		}
		
		av_free_packet(&pkt);
	}
	
	//写文件尾
	av_write_trailer(ofmt_ctx);
	
end:
	avformat_close_input(&ifmt_ctx);
	/* close output */
	if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
		avio_close(ofmt_ctx->pb);
	
	avformat_free_context(ofmt_ctx);
	if (ret < 0 && ret != AVERROR_EOF) {
		printf( "Error occurred.\n");
		return -1;
	}
	return 0;
}

2.USB→rtmp推流→URL

/*
	2020.1.6    清晰度过低 延迟问题待解决
*/


//	gcc ctuiliu.c -o 1 -I/root/ffmpeg/include/ -L/root/ffmpeg/lib -lavcodec -lavformat -lavdevice -lavutil -lswscale -lSDL -lswresample
//	./1 
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <assert.h>
#include <fcntl.h>
#include <malloc.h>
#include <math.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/poll.h>
#include <linux/types.h>
#include <linux/videodev2.h>
#include <libavutil/time.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavutil/timestamp.h>  
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavdevice/avdevice.h>
#include <libswscale/swscale.h>

#define STREAM_DURATION   100 /*预设1时长(秒)  被当做int来使用     */
#define STREAM_FRAME_RATE   25 /* 25 images/s */
#define STREAM_PIX_FMT   AV_PIX_FMT_YUV420P /* default pix_fmt */

#define DEV_TYPE		"video4linux2"
#define DEV_NAME		"/dev/video1"
#define MAX_CHANNEL 	(4)
#define AV_IO_BUF_SIZE	(96*1024)
#define CLEAR(x) 		memset(&(x), 0, sizeof(x))

#define camera_in

#if 0
	#define out_flv
	#undef  out_url
#else
	#define out_url
	#undef  out_flv
#endif

// a wrapper around a single output AVStream  
typedef struct OutputStream {  
    AVStream *st;  
    AVCodecContext *enc;  
  
    int64_t next_pts;  /* pts of the next frame that will be generated */  
    int samples_count;  
  
    AVFrame *frame;  
    AVFrame *tmp_frame;  
  
    float t, tincr, tincr2;  
  
    struct SwsContext *sws_ctx;  
    struct SwrContext *swr_ctx;  
} OutputStream;  //相较于标准的结构体少了后面的部分,只有前面的几个参数
  
  
typedef struct IntputDev {  
	AVCodecContext  	*pCodecCtx;  		//pCodecCtx=v_ifmtCtx->streams[videoindex]->codec;  
	AVCodec         	*pCodec;  			//pCodec=avcodec_find_decoder(pCodecCtx->codec_id);  
	AVFormatContext 	*v_ifmtCtx;  		//avformat_alloc_context +  avformat_open_input(&v_ifmtCtx,"/dev/video0",ifmt,NULL)
	int  				videoindex;  
	struct SwsContext 	*img_convert_ctx;  
	AVPacket 			*in_packet;  		//(AVPacket *)av_malloc(sizeof(AVPacket)); -->av_read_frame得到AVPacket内容
	AVFrame 			*pFrame,*pFrameYUV;  //av_frame_alloc---->解码得到pFrame→→格式转换→→pFrameYUV	avpicture_fill((AVPicture *)pFrameYUV, out_buffer..) 
}IntputDev;  

static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)  
{  
    AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;  
  
    printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",  
           av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),  
           av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),  
           av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),  
           pkt->stream_index);  
}  

static void add_stream(OutputStream *ost, AVFormatContext *oc,  AVCodec **codec,  enum AVCodecID codec_id) 
/*
	一个OutputStream需要1、OutputStream->AVCodecContext *enc == AVCodecContext <- AVCodec <- codec_id
													填充基本参数:codec_id bit_rate width height等避要的参数
						2、OutputStream->AVStream *st=avformat_new_stream(oc, NULL);  
													更新ost->st->id
*/ 
{  
    AVCodecContext *c;  
    int i;  
  
    *codec = avcodec_find_encoder(codec_id);  
    if (!(*codec)) {  fprintf(stderr, "Could not find encoder for '%s'\n",  avcodec_get_name(codec_id));  exit(1);  }  
  
    ost->st = avformat_new_stream(oc, NULL);  
    if (!ost->st) {  fprintf(stderr, "Could not allocate stream\n");  exit(1);  }  
	
    ost->st->id = oc->nb_streams-1;  
    c = avcodec_alloc_context3(*codec);  
    if (!c) {  fprintf(stderr, "Could not alloc an encoding context\n");  exit(1);  }  
	ost->enc = c;  
  
	switch ((*codec)->type) {  
	case AVMEDIA_TYPE_AUDIO:  
		c->sample_fmt  = (*codec)->sample_fmts ?  
			(*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;  
		c->bit_rate    = 64000;  
		c->sample_rate = 44100;  
		if ((*codec)->supported_samplerates) {  
			c->sample_rate = (*codec)->supported_samplerates[0];  
			for (i = 0; (*codec)->supported_samplerates[i]; i++) {  
				if ((*codec)->supported_samplerates[i] == 44100)  
					c->sample_rate = 44100;  
			}  
		}  
		c->channels        = av_get_channel_layout_nb_channels(c->channel_layout);  
		c->channel_layout = AV_CH_LAYOUT_STEREO;  
		if ((*codec)->channel_layouts) {  
			c->channel_layout = (*codec)->channel_layouts[0];  
			for (i = 0; (*codec)->channel_layouts[i]; i++) {  
				if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)  
					c->channel_layout = AV_CH_LAYOUT_STEREO;  
			}  
		}  
		c->channels        = av_get_channel_layout_nb_channels(c->channel_layout);  
		ost->st->time_base = (AVRational){ 1, c->sample_rate };  
		break;  
  
	case AVMEDIA_TYPE_VIDEO:  
		c->codec_id = codec_id;  
  
		c->bit_rate = 400000;  
		/* Resolution must be a multiple of two. */  
		c->width    = 640;  
		c->height   = 480;  
		/* timebase: This is the fundamental unit of time (in seconds) in terms 
		 * of which frame timestamps are represented. For fixed-fps content, 
		 * timebase should be 1/framerate and timestamp increments should be 
		 * identical to 1. */  
		ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };  
		c->time_base       = ost->st->time_base;  
  
		c->gop_size      = 12; /* emit one intra frame every twelve frames at most */  
		c->pix_fmt       = STREAM_PIX_FMT;  
		if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {  
			/* just for testing, we also add B-frames */  
			c->max_b_frames = 2;  
		}  
		if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {  
			/* Needed to avoid using macroblocks in which some coeffs overflow. 
			 * This does not happen with normal video, it just happens here as 
			 * the motion of the chroma plane does not match the luma plane. */  
			c->mb_decision = 2;  
		}  
	break;  
  
	default:  
		break;  
	}  
  
	/* Some formats want stream headers to be separate. */  
	if (oc->oformat->flags & AVFMT_GLOBALHEADER)  /*	Place global headers in extradata instead of every keyframe.	*/
		c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;  
		//AVFormatContext *oc的格式需要分离的文件头→→OutputStream->AVCodecContext的格式也需要分离的文件头
}  




/**************************************************************************/  
/* video output */  
  
static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)  //申请一个指定长宽像素的AVFrame
{  
	AVFrame *picture;  
	int ret;  
  
	picture = av_frame_alloc();  //this only allocates the AVFrame itself, not the data buffers.
	//Those must be allocated through other means, e.g. with av_frame_get_buffer() or manually.
	if (!picture)  	return NULL;  
  
	picture->format = pix_fmt;  
	picture->width  = width;  
	picture->height = height;  
  
	/* allocate the buffers for the frame data */  
	ret = av_frame_get_buffer(picture, 32);  //为音频或视频数据分配新的缓冲区。
	if (ret < 0) {  
		fprintf(stderr, "Could not allocate frame data.\n");  
		exit(1);  
	}  
  
	return picture;  
}  
  
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)  
/*
	1.avcodec_open2(AVCodecContext, AVCodec, &opt);    
	2.allocate and init a re-usable frame    	即OutputStream *ost->frame
	3.prepar ost->tmp_frame 					即OutputStream *ost->tmp_frame 
	4.avcodec_parameters_from_context
*/
{  
	int ret;  
	AVCodecContext *c = ost->enc;  
	AVDictionary *opt = NULL;  
  
	av_dict_copy(&opt, opt_arg, 0);  
  
	/* open the codec */  
	ret = avcodec_open2(c, codec, &opt);  
	//if (ret < 0) {  fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));  exit(1);  }  
	av_dict_free(&opt);  
	
  
	/* allocate and init a re-usable frame */  
	ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);  
	if (!ost->frame) {  fprintf(stderr, "Could not allocate video frame\n");  exit(1);  }  
	//printf("ost->frame alloc success fmt=%d w=%d h=%d\n",c->pix_fmt,c->width, c->height);  
  
  
	/* If the output format is not YUV420P, then a temporary YUV420P 
	 * picture is needed too. It is then converted to the required 
	 * output format. */  
	ost->tmp_frame = NULL;  
	if (c->pix_fmt != AV_PIX_FMT_YUV420P) {  
		ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);  
		if (!ost->tmp_frame) {  fprintf(stderr, "Could not allocate temporary picture\n");  exit(1);  }  
	}  
  
	/* copy the stream parameters to the muxer */  
	ret = avcodec_parameters_from_context(ost->st->codecpar, c);  
	if (ret < 0) {  fprintf(stderr, "Could not copy the stream parameters\n");  exit(1);  }  
}  
  
static AVFrame *get_video_frame1(OutputStream *ost,IntputDev* input,int *got_pic)  
//数据流:(读取)input->in_packet-(解码)---input->pFrame--(转换sws_scale)--ost->frame
{  
  
	int ret, got_picture;  
	AVCodecContext *c = ost->enc;  
	AVFrame * ret_frame=NULL;  
	if (av_compare_ts(ost->next_pts, c->time_base,  STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)  //时间戳超过指定的视频持续时间STREAM_DURATION就return
		return NULL;  
  
	/* when we pass a frame to the encoder, it may keep a reference to it 
	 * internally; make sure we do not overwrite it here */  
	if (av_frame_make_writable(ost->frame) < 0)  
		exit(1);  
	  
  
	if(av_read_frame(input->v_ifmtCtx, input->in_packet)>=0){  
		if(input->in_packet->stream_index==input->videoindex){  
			ret = avcodec_decode_video2(input->pCodecCtx, input->pFrame, &got_picture, input->in_packet);  
			*got_pic=got_picture;  
  
			if(ret < 0){  
				printf("Decode Error.\n");  
				av_free_packet(input->in_packet);  
				return NULL;  
			}  
			if(got_picture){  
				sws_scale(input->img_convert_ctx, (const unsigned char* const*)input->pFrame->data, input->pFrame->linesize, 0, input->pCodecCtx->height, 
													ost->frame->data,  ost->frame->linesize);  
				ost->frame->pts =ost->next_pts++;  
				ret_frame= ost->frame;  
			}  
		}  
		av_free_packet(input->in_packet);  
	}  
	return ret_frame;  
}  


// encode one video frame and send it to the muxer 
static int write_video_frame1(AVFormatContext *oc, OutputStream *ost,AVFrame *frame)  
//1.avcodec_encode_video2 	2.调用write_frame函数
{  
	int ret;  
	AVCodecContext *c = ost->enc;   //AVCodecContext->time_base以帧率为标准,而写入前要转化成OutputStream->st->time_base,即真正的时间戳
	int got_packet = 0;  
	AVPacket pkt = { 0 };  
  
	if(frame==NULL)  return 1;  
  
	av_init_packet(&pkt);  
  
  
	ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);  //查看avcodec_encode_video2可以发现pkt.pts搬自frame.pts
	if (ret < 0) {  fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));  exit(1);  }  
	
	//printf("---video- pkt.pts=%s     ",av_ts2str(pkt.pts));  //av_ts2str即将包含时间戳的int64_t变成char*buf    起始就是输出帧序号
	if (pkt.pts == 0) printf("----st.num=%d   st.den=%d	  codec.num=%d   codec.den=%d---------\n",
							ost->st->time_base.num,ost->st->time_base.den,  c->time_base.num,c->time_base.den);  
	//只在第一次时输出流的AVRational 和 编码器的AVRational
	
	if (got_packet) {  ret = write_frame(oc, &c->time_base, ost->st, &pkt);  }
	else {  ret = 0;  }  
  
	if (ret < 0) {  fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));  exit(1);  }  
  
	return (frame || got_packet) ? 0 : 1;  
}  
  
int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)  //Definition at line 81 of file muxing.c.
//pkt写入之前要先处理pkt->pts和pkt->stream_index
{  
    /* rescale output packet timestamp values from codec(帧序号) to stream timebase */  
    av_packet_rescale_ts(pkt, *time_base, st->time_base);  
			//例如:av_packet_rescale_ts将pkt->pts从当前timebast(1/帧率的timebase,值=帧序号33)→目标timebase(输出timebase,值=时间戳118800)
    pkt->stream_index = st->index;  
  
	log_packet(fmt_ctx, pkt);  
  
    return av_interleaved_write_frame(fmt_ctx, pkt);  
}  
  

static void close_stream(AVFormatContext *oc, OutputStream *ost)  
{  
	avcodec_free_context(&ost->enc);  
	av_frame_free(&ost->frame);  
	av_frame_free(&ost->tmp_frame);  
	sws_freeContext(ost->sws_ctx);  
	swr_free(&ost->swr_ctx);  
}  
  




int main(int argc, char* argv[])
{

	avdevice_register_all(); 
	av_register_all();
    avformat_network_init();
	const char *outUrl = "rtmp://192.168.126.128:1935/hls/test";

//****************输入
#ifdef camera_in
	IntputDev video_input = { 0 };  //总输入
	AVFormatContext *v_ifmtCtx = avformat_alloc_context(); 
	
	AVInputFormat *ifmt=av_find_input_format("video4linux2");  
	if(avformat_open_input(&v_ifmtCtx,"/dev/video1",ifmt,NULL) != 0){
		printf("Couldn't open input stream./dev/video1\n");
		return -1;
	}  
	//也可以不用av_find_input_format确定格式,而是让avformat_open_input自行判断格式
	 
	if(avformat_find_stream_info(v_ifmtCtx,NULL)<0){
		printf("Couldn't find stream information.\n");
		return -1;
	}  

	av_dump_format(v_ifmtCtx, 0, "/dev/video1", 0);

	
	int videoindex=-1;  
	for(int i=0; i<v_ifmtCtx->nb_streams; i++){//find video stream index
		if(v_ifmtCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
			videoindex=i;
			break;
		}  
		if(i == v_ifmtCtx->nb_streams-1){
			printf("Couldn't find a video stream.\n");
			return -1;
		}
	}	
		

	
	AVCodecContext  *pCodecCtx	= v_ifmtCtx->streams[videoindex]->codec;  
	AVCodec         *pCodec		= avcodec_find_decoder(pCodecCtx->codec_id); 
	printf("pCodecCtx->width=%d pCodecCtx->height=%d \n",pCodecCtx->width, pCodecCtx->height); 

	
	if(avcodec_open2(pCodecCtx, pCodec,NULL)<0){
		printf("Could not open codec.\n");return -1;
	}  
	printf("open codec success!\n");
	
	
//prepare pFrame && pFrameYUV(并关联数据buff)	&& SwsContext  
/*	猜测:pFrame会在解码时自动获得数据buff  而sws_scale是以接受方pFrameYUV事先有buff为前提的	*/
	AVFrame *pFrame 	= av_frame_alloc();  
	AVFrame *pFrameYUV 	= av_frame_alloc();   
	unsigned char *out_buffer=(unsigned char *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));  
	avpicture_fill((AVPicture *)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);  
	//或者函数av_frame_get_buffer利用AVframe自身的宽高进行申请buff

	struct SwsContext *img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);   
	if (pCodecCtx->pix_fmt == AV_PIX_FMT_YUV420P) printf("USBcamera's raw pix_fmt == pCodecCtx->pix_fmt == AV_PIX_FMT_YUV420P == 0\n");
	if (pCodecCtx->pix_fmt == AV_PIX_FMT_YUYV422 ) printf("USBcamera's raw pix_fmt == pCodecCtx->pix_fmt == AV_PIX_FMT_YUYV422 == 1 \n");
	/* enum AVPixelFormat中各像素格式枚举值
	{
		AV_PIX_FMT_NONE 	-1?
		AV_PIX_FMT_YUV420P  0
		AV_PIX_FMT_YUYV422 	1	
	}
	*/


	AVPacket *in_packet = (AVPacket *)av_malloc(sizeof(AVPacket)); 
	

//关联IntputDev video_input和其子成员
	video_input.img_convert_ctx=img_convert_ctx;  
	video_input.in_packet=in_packet;  
	video_input.pCodecCtx=pCodecCtx;  
	video_input.pCodec=pCodec;  
	video_input.v_ifmtCtx=v_ifmtCtx;  	//AVFormatContext *
	video_input.videoindex=videoindex;  
	video_input.pFrame=pFrame;  		//packet解码得到
	video_input.pFrameYUV=pFrameYUV;  	//pFrame->sws->pFrameYUV

#endif


	int have_video = 0;
	int encode_video = 0;

	
//************************输出至文件 【AVFormatContext *oc   OutputStream video_st】
#ifdef out_flv

	printf("\n-----down------for flv.flv----------\n");	
	OutputStream video_st = { 0 }; 
	const char *filename = "video2flv.flv";
	
	AVFormatContext *oc;  
	AVCodec *video_codec;//输出侧的编码器   而341行的AVCodec         *pCodec;  为输入侧的解码器
	avformat_alloc_output_context2(&oc, NULL, NULL, filename); 
	if (!oc) {printf("Could not deduce output format from file extension: using MPEG.\n");avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);  return 1;}  
	
	AVOutputFormat *fmt = oc->oformat;  //AVFormatContext-> AVOutputFormat *oformat
	printf("**************oc->oformat->video_codec=%d\n",fmt->video_codec);

	// Add the audio and video streams using the default format codecs and initialize the codecs. 
	if (fmt->video_codec != AV_CODEC_ID_NONE) {  
		add_stream(&video_st, oc, &video_codec, fmt->video_codec);  //OutputStream *ost, AVFormatContext *oc,  AVCodec **codec,  enum AVCodecID codec_id
		/*	创建视频流,并获取对应的AVCodecContext *(ost->enc)				//Definition at line 93 of file muxing.c.
		1.video_codec = avcodec_find_encoder(fmt->video_codec);
		2.ost->st = avformat_new_stream(ofmt_ctx, NULL);
		3.ost->enc = c = avcodec_alloc_context3(video_codec); 
		4.填充AVCodecContext *c;  
			*/ 			
		printf("**************oc->oformat->video_codec=%d\n",fmt->video_codec);
		printf("*********video_codec->id=%d\n",video_codec->id);
		have_video = 1;  
		encode_video = 1;  
	}  

  
	// Now that all the parameters are set, we can open the audio and  video codecs and allocate the necessary encode buffers. 
	if (have_video)  open_video(oc, video_codec, &video_st, NULL);  //输出侧打开编码器
	//1.avcodec_open2(c, codec, &opt);    2.allocate and init a re-usable frame 3.prepar ost->tmp_frame 4.avcodec_parameters_from_context
	
	/* open the output file, if needed 					creat AVIOcontext*/  
	if (!(fmt->flags & AVFMT_NOFILE)) {  
		if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0){
			fprintf(stderr, "Could not open '%s': %s\n", filename);
			return -1;
		}  
	}  
	
	av_dump_format(oc, 0, filename, 1); 
	printf("-----up-------for flv.flv----------\n");	

#endif	





//**************************输出至URL 如nginx服务器----【AVFormatContext * ofmt_ctx 相当于oc  	OutputStream ost】--------------------
#ifdef out_url
	printf("\n-----down------for url----------\n");
	OutputStream ost = {0};
	AVCodec *video_codec;//输出编码器    起始和前面的video_codec相同   表现为video_codec->id=21   ->->进而2种输出目标所创建(填充默认值)的AVCodecContext相同
	AVFormatContext * ofmt_ctx = NULL;		
	if (avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", outUrl) < 0){
		avformat_free_context(v_ifmtCtx);
		avformat_free_context(ofmt_ctx);
		printf("error while avformat_alloc_output_context2 in %d\n",__LINE__);
		return 1;
	}

	AVOutputFormat *fmt = ofmt_ctx->oformat;  //AVFormatContext-> AVOutputFormat *oformat
	if(fmt->video_codec != AV_CODEC_ID_NONE)//是有视频codec的->那就创建对应的视频流
	{
		add_stream(&ost, ofmt_ctx, &video_codec, fmt->video_codec);  
		/*	创建视频流,并获取对应的AVCodecContext *(ost->enc)				//Definition at line 93 of file muxing.c.
		1.video_codec = avcodec_find_encoder(fmt->video_codec);
		2.ost->st = avformat_new_stream(ofmt_ctx, NULL);
		3.ost->enc = c = avcodec_alloc_context3(video_codec); 
		4.填充AVCodecContext *c;  
			*/
		printf("**************oc->oformat->video_codec=%d\n",fmt->video_codec);
		printf("*********video_codec->id=%d\n",video_codec->id);
		have_video = 1;  
		encode_video = 1;  
	}
	
	// Now that all the parameters are set, we can open the audio and  video codecs and allocate the necessary encode buffers. 
	if (have_video)  open_video(ofmt_ctx, video_codec, &ost, NULL); //输出侧打开编码器
	//1.avcodec_open2(c, codec, &opt);    2.allocate and init a re-usable frame 3.prepar ost->tmp_frame 4.avcodec_parameters_from_context

	
	/* open the output file, if needed 					creat AVIOcontext*/  
	if (!(fmt->flags & AVFMT_NOFILE)) {  
		if (avio_open(&ofmt_ctx->pb, outUrl, AVIO_FLAG_WRITE) < 0){
			fprintf(stderr, "Could not open '%s'\n", outUrl); 
			return 1;  
		}  
	}

	av_dump_format(ofmt_ctx, 0, outUrl, 1);
	printf("-----up-------for flv.flv----------\n");	
	/*2种输出目标 均为flv格式 故av_dump_format输出相同:
	Output #0, flv, to 'rtmp://192.168.126.128:1935/hls/test':
    Stream #0:0: Video: flv1, yuv420p, 352x288, q=2-31, 400 kb/s, 25 tbn
	*/
	//2个AVCodecContext的内容应该是一致的吧  没有确认
#endif

	printf("\n\nnow prepar finished\n\n");





	
//-------------------------写入
#ifdef out_flv
	if (avformat_write_header(oc, 0) < 0){//写至video2out.flv
		printf("error while avformat_write_header");
		avformat_free_context(v_ifmtCtx);
		avformat_free_context(oc);
		return 1;
	}
	printf("avformat_write_header Success! in %d\n",__LINE__);
	
	//-------------读+解码+转换+编码+写入--------------------------
	int got_pic;	
	while(encode_video){
		/* select the stream to encode */  
		AVFrame *frame=get_video_frame1(&video_st,&video_input,&got_pic);  
		//av_read_frame--avcodec_decode_video2---sws_scale->->->得到解码后的ost->frame
		if(!got_pic)  
		{  
			usleep(10000);  //单位是微秒(百万分之一秒)。
			continue;  
		}  
		encode_video = !write_video_frame1(oc, &video_st,frame);
	}  
	printf("%d write success\n",__LINE__);	

	/* Write the trailer, if any. The trailer must be written before you 
	 * close the CodecContexts open when you wrote the header; otherwise 
	 * av_write_trailer() may try to use memory that was freed on 
	 * av_codec_close(). */   
	if (av_write_trailer(oc) < 0){
		printf( "Error occurred when av_write_trailer(oc)\n");
		return 1;
	}
#endif	


#ifdef out_url
	if (avformat_write_header(ofmt_ctx, 0) < 0){//写至URL
		printf("error while avformat_write_header");
		avformat_free_context(v_ifmtCtx);
		avformat_free_context(ofmt_ctx);
		return 1;
	}
	printf("avformat_write_header Success! in %d\n",__LINE__);
	
		//-------------读+解码+转换+编码+写入--------------------------
	int got_pic;	
	while(encode_video){
		/* select the stream to encode */  
		AVFrame *frame=get_video_frame1(&ost,&video_input,&got_pic);  
		//0.av_compare_tsc超时剔除		1.av_frame_make_writable		2.av_read_frame--avcodec_decode_video2---sws_scale->->->得到解码后的ost->frame
		if(!got_pic)  
		{  
			usleep(10000);  //单位是微秒(百万分之一秒)。
			continue;  
		}  
		//write_video_frame1(ofmt_ctx, &ost,frame);
		encode_video = !write_video_frame1(ofmt_ctx, &ost,frame);//前面get_video_frame1得到的frame=NULL则返回1->停止while循环
	}  
	printf("%d write success\n",__LINE__);	

	if (av_write_trailer(ofmt_ctx) < 0)	{	printf( "Error occurred when av_write_trailer(ofmt_ctx)\n");return 1;	}
#endif	  		
	
	
	//释放部分最后再处理
	sws_freeContext(video_input.img_convert_ctx);  
	avcodec_close(video_input.pCodecCtx);  
	av_free(video_input.pFrameYUV);  
	av_free(video_input.pFrame);      
	avformat_close_input(&video_input.v_ifmtCtx);  
#ifdef out_flv
	/* Close each codec. */  
	if (have_video)  close_stream(oc, &video_st); 
	if (!(fmt->flags & AVFMT_NOFILE))  avio_closep(&oc->pb);  /* Close the output file. */ 
	/* free the stream */  
	avformat_free_context(oc);	
#endif
  
#ifdef out_url
	if (have_video)  close_stream(ofmt_ctx, &ost); 
	avformat_free_context(ofmt_ctx);
#endif

	return 0;
}
发布了81 篇原创文章 · 获赞 1 · 访问量 2910

猜你喜欢

转载自blog.csdn.net/qq_42024067/article/details/103751000