FFmpeg读取USB摄像头H264帧rtmp推流

文章目录


经过这几天的验证,终于走通了FFmpeg读取USB摄像头H264帧,然后用rtmp推流。使用的版本是4.0.2,网上的示例要么是命令形式的,要么是读取YUV格式的数据,然后在编码的,所以只能自己摸索了。
FFmpeg的源码在ubuntu16.04上的编译就不说了,这个网上的文章很多,这里我要说的是,好像FFmpeg对v4l2的封装,不能从摄像头多种输出格式数据中,选择 V4L2_PIX_FMT_H264这种格式的数据输出,只能是默认的输出,这点还没有研究明白。
没办法只能写v4l2的操作,将数据保存到内存中,在用FFmpeg从内存中读取数据,最后用rtmp推流。
这里要非常感谢 雷神的两篇博客:

可以说没有这两篇博客,我还要摸索很久,可惜他不在世了;他的贡献现在还在让我们受益。

代码

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <assert.h>
#include <fcntl.h>
#include <malloc.h>
#include <math.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/poll.h>
#include <linux/types.h>
#include <linux/videodev2.h>
#include <libavutil/time.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavdevice/avdevice.h>

#define DEV_TYPE		"video4linux2"
#define DEV_NAME		"/dev/video1"
#define MAX_CHANNEL 	(4)
#define AV_IO_BUF_SIZE	(96*1024)
#define CLEAR(x) 		memset(&(x), 0, sizeof(x))

struct buffer {
	void   *start;
	size_t length;
};

struct usbcamera_node
{
	int channel;
	char id[32];
	int usb_port;
	//V4L2
	char devname[32];
	int fd;
	struct v4l2_format fmt;
	struct v4l2_streamparm parm;
	struct v4l2_requestbuffers req;
	struct buffer *buffers;
	int n_buffers;
	int poll_index[MAX_CHANNEL];

};

struct usbcamera_node usbcamra;
struct pollfd usbcamra_poll_fd[MAX_CHANNEL];
nfds_t usbcamra_poll_fd_num = 0;
unsigned int frame_len = 0;
unsigned int frame_cnt = 0;

int avError(int errNum);

static int xioctl(int fh, int request, void *arg)
{
	int r;
	do
	{
		r = ioctl(fh, request, arg);
	} while (-1 == r && EINTR == errno);

	return r;
}

static int video_init(struct usbcamera_node *camera_node)
{
	struct v4l2_capability cap;
	struct v4l2_fmtdesc fmtdesc;
	int ret = 0;

	// open the video device with the API of open()
	camera_node->fd = open(camera_node->devname, O_RDWR | O_NONBLOCK, 0);

	if (-1 == camera_node->fd)
	{
		fprintf(stderr, "Cannot open '%s': %d, %s\n", camera_node->devname, errno, strerror(errno));
		return -1;
	}

	// inquire video device capability with the API of ioctl
	if (-1 == xioctl(camera_node->fd, VIDIOC_QUERYCAP, &cap))
	{
		fprintf(stderr, "%s is no V4L2 device\n", camera_node->devname);
		return -1;
	}

	// Set video device settings
	if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE))
	{
		fprintf(stderr, "%s is no video capture device\n", camera_node->devname);
		return -1;
	}

	if (!(cap.capabilities & V4L2_CAP_STREAMING))
	{
		fprintf(stderr, "%s does not support streaming i/o\n", camera_node->devname);
		return -1;
	}

	printf("\nVIDOOC_QUERYCAP\n");
	printf("the camera driver is: %s\n", cap.driver);
	printf("the camera card is: %s\n", cap.card);
	printf("the camera bus info is: %s\n", cap.bus_info);
	printf("the version is: %d\n", cap.version);
	printf("the capabilities is: 0x%x\n", cap.capabilities);
	printf("the device_caps is: 0x%x\n", cap.device_caps);

	fmtdesc.index = 0; //form number
	fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;//frame type
	while(ioctl(camera_node->fd, VIDIOC_ENUM_FMT, &fmtdesc) != -1)
	{
		printf("VIDIOC_ENUM_FMT success! fmtdesc.index:%d, fmtdesc.type:%d, fmtdesc.flags:%d, "
			   "fmtdesc.description:%s, fmtdesc.pixelformat:%d\n",
			   fmtdesc.index, fmtdesc.type, fmtdesc.flags, fmtdesc.description, fmtdesc.pixelformat);
		fmtdesc.index ++;
	}

	if (-1 == xioctl(camera_node->fd, VIDIOC_S_FMT, &camera_node->fmt))
	{
		fprintf(stderr, "%s set fmt failed\n", camera_node->devname);
		return -1;
	}

	printf("VIDIOC_S_FMT success! width:%d, height:%d, pixelformat:%x, field:%d, bytesperline:%d, "
		   "sizeimage:%d, colorspace:%d, priv:%d, flags:%x, ycbcr_enc:%d, quantization:%d, xfer_func:%d\n",
		   camera_node->fmt.fmt.pix.width, camera_node->fmt.fmt.pix.height, camera_node->fmt.fmt.pix.pixelformat,
		   camera_node->fmt.fmt.pix.field, camera_node->fmt.fmt.pix.bytesperline, camera_node->fmt.fmt.pix.sizeimage,
		   camera_node->fmt.fmt.pix.colorspace, camera_node->fmt.fmt.pix.priv, camera_node->fmt.fmt.pix.flags,
		   camera_node->fmt.fmt.pix.ycbcr_enc, camera_node->fmt.fmt.pix.quantization, camera_node->fmt.fmt.pix.xfer_func);

	struct v4l2_streamparm parm = {0};
	parm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	xioctl(camera_node->fd, VIDIOC_G_PARM, &parm);
	parm.parm.capture.timeperframe.numerator = 1;
	parm.parm.capture.timeperframe.denominator = camera_node->parm.parm.capture.timeperframe.denominator;
	ret = xioctl(camera_node->fd, VIDIOC_S_PARM, &parm);
	if(ret !=0 )
	{
		printf("line:%d parm set error, errno:%d, str:%s\n", __LINE__, errno, strerror(errno));
		return -1;
	}
	printf("fd %d ret %d set Frame rate %.3f fps\n", camera_node->fd, ret,
		   1.0 * parm.parm.capture.timeperframe.denominator / parm.parm.capture.timeperframe.numerator);

	// Require the Driver of V4L2 buffers for MMAP
	if (-1 == xioctl(camera_node->fd, VIDIOC_REQBUFS, &camera_node->req))
	{
		if (EINVAL == errno)
		{
			fprintf(stderr, "%s does not support memory mapping\n", "USBCAMERA");
			return -1;
		}
		else
		{
			return -1;
		}
	}

	// Make the buffers map to the user space
	for (camera_node->n_buffers = 0; camera_node->n_buffers < camera_node->req.count; ++camera_node->n_buffers)
	{
		struct v4l2_buffer buf;

		CLEAR(buf);

		buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
		buf.memory = V4L2_MEMORY_MMAP;
		buf.index = camera_node->n_buffers;

		if (-1 == xioctl(camera_node->fd, VIDIOC_QUERYBUF, &buf))
		{
			ret = -1;
			break;
		}

		camera_node->buffers[camera_node->n_buffers].length = buf.length;
		camera_node->buffers[camera_node->n_buffers].start = mmap(NULL, buf.length, PROT_READ | PROT_WRITE ,MAP_SHARED, camera_node->fd, buf.m.offset);
		printf("mmap buffer index:%d buf %p length %d\n", camera_node->n_buffers, camera_node->buffers[camera_node->n_buffers].start, buf.length);

		if (MAP_FAILED == camera_node->buffers[camera_node->n_buffers].start)
		{
			ret = -1;
			break;
		}
	}
	if((ret == -1) && (camera_node->n_buffers != 0))
	{
		for(ret = 0; ret < camera_node->n_buffers; ret++)
		{
			munmap(camera_node->buffers[camera_node->n_buffers].start, camera_node->buffers[camera_node->n_buffers].length);
			printf("munmap buffer index:%d buf %p length %ld\n",
				   camera_node->n_buffers, camera_node->buffers[camera_node->n_buffers].start,
				   camera_node->buffers[camera_node->n_buffers].length);
		}
		return -1;
	}

	return 0;
}

static int start_capturing(struct usbcamera_node *camera_node)
{
	unsigned int i;
	enum v4l2_buf_type type;
	int n_buffers = 0;

	n_buffers = camera_node->n_buffers;
	printf("start_capturing fd %d n_buffers %d\n", camera_node->fd, n_buffers);
	for (i = 0; i < n_buffers; ++i)
	{
		struct v4l2_buffer buf;

		CLEAR(buf);
		buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
		buf.memory = V4L2_MEMORY_MMAP;
		buf.index = i;

		if (-1 == xioctl(camera_node->fd, VIDIOC_QBUF, &buf))
		{
			printf("fd %d VIDIOC_QBUF faild\n", camera_node->fd);
			return -1;
		}
	}
	printf("fd %d VIDIOC_QBUF OK!\n", camera_node->fd);

	type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	if (-1 == xioctl(camera_node->fd, VIDIOC_STREAMON, &type))
	{
		printf("fd %d VIDIOC_STREAMON faild\n", camera_node->fd);
		return -1;
	}
	printf("fd %d VIDIOC_STREAMON Ok!\n", camera_node->fd);
	return 0;
}

static int stop_capturing(struct usbcamera_node *camera_node)
{
	enum v4l2_buf_type type;

	type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	if (-1 == xioctl(camera_node->fd, VIDIOC_STREAMOFF, &type))
	{
		printf("fd %d VIDIOC_STREAMOFF faild\n", camera_node->fd);
		return -1;
	}
	printf("fd %d VIDIOC_STREAMOFF Ok!\n", camera_node->fd);
	return 0;
}

static int read_frame(struct usbcamera_node *camera_node, unsigned char *pbuf, unsigned int ch, struct timeval *tvl)
{
	struct v4l2_buffer buf;
	int count = 0;
	int n_buffers = 0;

	n_buffers = camera_node->n_buffers;
	CLEAR(buf);
	buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	buf.memory = V4L2_MEMORY_MMAP;
	if (-1 == xioctl(camera_node->fd, VIDIOC_DQBUF, &buf))
	{
		switch (errno) {
		case EAGAIN:
			return 0;
		case EIO:
		/* Could ignore EIO, see spec. */
		/* fall through */
		default:
			{
				printf("VIDIOC_DQBUF faild\n");
				return -1;
			}
		}
	}

	if(buf.index > n_buffers)
	{
		printf("buf.indx < n_buffers %d %d\n", buf.index, n_buffers);
		return -1;
	}

	memcpy(pbuf, camera_node->buffers[buf.index].start, buf.bytesused);
	tvl->tv_sec = buf.timestamp.tv_sec;
	tvl->tv_usec = buf.timestamp.tv_usec;
	count = buf.bytesused;

	if (-1 == xioctl(camera_node->fd, VIDIOC_QBUF, &buf))
	{
		printf("VIDIOC_QBUF faild\n");
	}

	return count;
}

void free_camra_resource(struct usbcamera_node *camera_node)
{
	int cnt = 0;
	for(cnt = 0; cnt < camera_node->n_buffers; cnt++)
	{
		munmap(camera_node->buffers[cnt].start, camera_node->buffers[cnt].length);
		printf("munmap buffer index:%d buf %p length %ld\n",
			   cnt, camera_node->buffers[cnt].start,
			   camera_node->buffers[cnt].length);
	}
}


int read_buffer(void *opaque, uint8_t *pbuf, int buf_size)
{
	struct timeval tvl;
	if(poll(usbcamra_poll_fd, usbcamra_poll_fd_num, -1) == -1)
	{
		printf("usbcamra poll failed !!!!!!!!!!!!!\n");
		return AVERROR_EXTERNAL;
	}

	if((usbcamra_poll_fd[0].revents & POLLERR) == POLLERR)
	{
		printf("usbcamra_poll_fd[0].revents 0x%x\n", usbcamra_poll_fd[0].revents);
		return AVERROR_EXTERNAL;
	}

	if(usbcamra_poll_fd[0].revents && POLLIN)
	{
		frame_len = read_frame(&usbcamra, pbuf, 0, &tvl);
		printf("frame_cnt:%d, frame_len:%d, tvl.tv_sec:%ld ", frame_cnt, frame_len, tvl.tv_sec);
		printf("%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x "
			   "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x \n",
			   pbuf[0],pbuf[1],pbuf[2],pbuf[3],pbuf[4],pbuf[5],pbuf[6],pbuf[7],pbuf[8],pbuf[9],pbuf[10],pbuf[11],
			   pbuf[12],pbuf[13],pbuf[14],pbuf[15],pbuf[16],pbuf[17],pbuf[18],pbuf[19],pbuf[20],pbuf[21],pbuf[22],
			   pbuf[23],pbuf[24],pbuf[25],pbuf[26],pbuf[27],pbuf[28],pbuf[29],pbuf[30],pbuf[31]);
	}
	frame_cnt++;
	usbcamra_poll_fd[0].revents = 0;

	if(frame_len > buf_size)
	{
		printf("frame_len is too big then buf_size\n");
		return buf_size;
	}
	return (int)frame_len;

}

//ffmpeg -f v4l2 -list_formats all -i /dev/video0
//程序执行:./ffmpeg_usb_rtmp /dev/video0 1280 720 30 1500000
int main(int argc, char* argv[])
{
    int videoindex = -1;
    unsigned int frame_rate = 0;
    //所有代码执行之前要调用av_register_all和avformat_network_init
    //初始化所有的封装和解封装 flv mp4 mp3 mov。不包含编码和解码
    av_register_all();
    avformat_network_init();

    if(argc != 5)
	{
		usbcamra.fmt.fmt.pix.width = 1280;
		usbcamra.fmt.fmt.pix.height = 720;
		frame_rate = 30;
	}
	else
	{
		usbcamra.fmt.fmt.pix.width = atoi(argv[2]);
		usbcamra.fmt.fmt.pix.height = atoi(argv[3]);
		frame_rate = atoi(argv[4]);
	}

	sprintf(usbcamra.devname, "%s", argv[1]);
	printf("width:%d, height:%d, dev:%s", usbcamra.fmt.fmt.pix.width, usbcamra.fmt.fmt.pix.height, usbcamra.devname);

	usbcamra.fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	usbcamra.fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_H264;
	usbcamra.fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;

	CLEAR(usbcamra.parm);
	usbcamra.parm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	usbcamra.parm.parm.capture.timeperframe.numerator = 1;
	usbcamra.parm.parm.capture.timeperframe.denominator = frame_rate;
	CLEAR(usbcamra.req);
	usbcamra.req.count = 16;
	usbcamra.req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	usbcamra.req.memory = V4L2_MEMORY_MMAP;
	usbcamra.buffers = calloc(usbcamra.req.count, sizeof(struct buffer));
	if(!usbcamra.buffers)
	{
		fprintf(stderr, "calloc faild, errno:%d, str:%s\n", errno, strerror(errno));
		return -1;
	}

	video_init(&usbcamra);
	start_capturing(&usbcamra);
	usbcamra_poll_fd[0].fd = usbcamra.fd;
	usbcamra_poll_fd[0].events = POLLIN;
	usbcamra_poll_fd_num = 1;

    //输出的地址
    const char *outUrl = "rtmp://192.168.1.102:1935/live";
    
	//AVFormatContext **ps  输入封装的上下文。包含所有的格式内容和所有的IO。如果是文件就是文件IO,网络就对应网络IO
	AVFormatContext *ifmt_ctx = NULL;

	ifmt_ctx = avformat_alloc_context();
	unsigned char* inbuffer=NULL;
	inbuffer = (unsigned char*)av_malloc(AV_IO_BUF_SIZE);
	if(inbuffer == NULL)
	{
		avformat_free_context(ifmt_ctx);
		printf("line:%d av_malloc failed!\n", __LINE__);
		return -1;
	}
	AVIOContext *avio_in = avio_alloc_context(inbuffer, AV_IO_BUF_SIZE, 0, NULL, read_buffer, NULL, NULL);
	if(avio_in == NULL)
	{
		avformat_free_context(ifmt_ctx);
		av_free((void*)inbuffer);
		printf("line:%d avio_alloc_context failed!\n", __LINE__);
		return -1;
	}

	ifmt_ctx->pb = avio_in;
	ifmt_ctx->flags = AVFMT_FLAG_CUSTOM_IO;

	//打开文件,解封文件头
	int ret = avformat_open_input(&ifmt_ctx, NULL, NULL, NULL);
	if (ret < 0)
	{
		avformat_free_context(ifmt_ctx);
		av_free((void*)inbuffer);
		avio_context_free(&avio_in);
		return avError(ret);
	}
	printf("avformat_open_input success!\n");

	ret = avformat_find_stream_info(ifmt_ctx, NULL);
	if (ret != 0)
	{
		avformat_free_context(ifmt_ctx);
		av_free((void*)inbuffer);
		avio_context_free(&avio_in);
		return avError(ret);
	}
	//打印视频视频信息
	//0打印所有  inUrl 打印时候显示,
	av_dump_format(ifmt_ctx, 0, NULL, 0);

	AVFormatContext * ofmt_ctx = NULL;
	//如果是输入文件 flv可以不传,可以从文件中判断。如果是流则必须传
	//创建输出上下文
	ret = avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", outUrl);
	if (ret < 0)
	{
		avformat_free_context(ifmt_ctx);
		av_free((void*)inbuffer);
		avio_context_free(&avio_in);
		avformat_free_context(ofmt_ctx);
		return avError(ret);
	}
	printf("avformat_alloc_output_context2 success!\n");
	printf("ifmt_ctx->nb_streams:%d\n", ifmt_ctx->nb_streams);
	//查找到当前输入流中的视频流,并记录视频流的索引
	unsigned int i;
	for (i = 0; i < ifmt_ctx->nb_streams; i++)
	{
		//获取输入视频流
		AVStream *in_stream = ifmt_ctx->streams[i];
		if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
		{
			videoindex = i;
		}
		//为输出上下文添加音视频流(初始化一个音视频流容器)
		AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
		if (!out_stream)
		{
			printf("未能成功添加音视频流\n");
			ret = AVERROR_UNKNOWN;
		}

		//将输入编解码器上下文信息 copy 给输出编解码器上下文
		//ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
		ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
		//ret = avcodec_parameters_from_context(out_stream->codecpar, in_stream->codec);
		//ret = avcodec_parameters_to_context(out_stream->codec, in_stream->codecpar);
		if (ret < 0)
		{
			printf("copy 编解码器上下文失败\n");
		}
		out_stream->codecpar->codec_tag = 0;

		out_stream->codec->codec_tag = 0;
		if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) {
			out_stream->codec->flags = out_stream->codec->flags | AV_CODEC_FLAG_GLOBAL_HEADER;
		}
	}

	printf("videoindex:%d\n", videoindex);
	av_dump_format(ofmt_ctx, 0, outUrl, 1);

	//打开IO
	ret = avio_open(&ofmt_ctx->pb, outUrl, AVIO_FLAG_WRITE);
	if (ret < 0)
	{
		avformat_free_context(ifmt_ctx);
		av_free((void*)inbuffer);
		avio_context_free(&avio_in);
		avformat_free_context(ofmt_ctx);
		return avError(ret);
	}

	//写入头部信息
	ret = avformat_write_header(ofmt_ctx, 0);
	if (ret < 0)
	{
		avformat_free_context(ifmt_ctx);
		av_free((void*)inbuffer);
		avio_context_free(&avio_in);
		avformat_free_context(ofmt_ctx);
		avError(ret);
	}

	printf("avformat_write_header Success!\n");
	//推流每一帧数据
	//int64_t pts  [ pts*(num/den)  第几秒显示]
	//int64_t dts  解码时间 [P帧(相对于上一帧的变化) I帧(关键帧,完整的数据) B帧(上一帧和下一帧的变化)]  有了B帧压缩率更高。
	AVPacket pkt;
	//获取当前的时间戳  微妙
	long long start_time = av_gettime();
	long long frame_index = 0;
	while (1)
	{
		//输入输出视频流
		AVStream *in_stream, *out_stream;
		//获取解码前数据
		ret = av_read_frame(ifmt_ctx, &pkt);
		if (ret < 0) break;

		//PTS(Presentation Time Stamp)显示播放时间
		//DTS(Decoding Time Stamp)解码时间
		//没有显示时间(比如未解码的 H.264 )
		if (pkt.pts == AV_NOPTS_VALUE)
		{
			//AVRational time_base:时基。通过该值可以把PTS,DTS转化为真正的时间。
			AVRational time_base1 = ifmt_ctx->streams[videoindex]->time_base;
			//计算两帧之间的时间
			int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(ifmt_ctx->streams[videoindex]->r_frame_rate);
			//配置参数
			pkt.pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE);
			pkt.dts = pkt.pts;
			pkt.duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE);
		}

		if (pkt.stream_index == videoindex)
		{
			AVRational time_base = ifmt_ctx->streams[videoindex]->time_base;
			AVRational time_base_q = { 1,AV_TIME_BASE };
			//计算视频播放时间
			int64_t pts_time = av_rescale_q(pkt.dts, time_base, time_base_q);
			//计算实际视频的播放时间
			int64_t now_time = av_gettime() - start_time;

			AVRational avr = ifmt_ctx->streams[videoindex]->time_base;
			printf("avr.num:%d, avr.den:%d, pkt.dts:%ld, pkt.pts:%ld, pts_time:%ld\n",
					avr.num,    avr.den,    pkt.dts,     pkt.pts,     pts_time);
			if (pts_time > now_time)
			{
				//睡眠一段时间(目的是让当前视频记录的播放时间与实际时间同步)
				printf("pts_time:%ld, now_time:%ld\n", pts_time, now_time);
				av_usleep((unsigned int)(pts_time - now_time));
			}
		}

		in_stream = ifmt_ctx->streams[pkt.stream_index];
		out_stream = ofmt_ctx->streams[pkt.stream_index];

		//计算延时后,重新指定时间戳
		pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		pkt.duration = (int)av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
		//字节流的位置,-1 表示不知道字节流位置,由程序自行探测
		pkt.pos = -1;
		if (pkt.stream_index == videoindex) {
			printf("Send %8lld video frames to output URL\n", frame_index);
			frame_index++;
		}

		ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
		if (ret < 0)
		{
			printf("发送数据包出错\n");
			break;
		}
		av_free_packet(&pkt);
	}

	stop_capturing(&usbcamra);
	free_camra_resource(&usbcamra);
	avformat_free_context(ifmt_ctx);
	av_free((void*)inbuffer);
	avio_context_free(&avio_in);
	avformat_free_context(ofmt_ctx);
	return 0;
}

int avError(int errNum) {
    char buf[1024];
    av_strerror(errNum, buf, sizeof(buf));
    printf("failed!\n");
    return -1;
}

程序运行方式:./ffmpeg_usb_rtmp /dev/video0 1280 720 30 1500000
有些USB摄像头支持命令设置H264编码率,但是这里没有写出来,因为不同厂家,命令不同,貌似v4l2没有这样统一的命令去设置编码率。
以上代码仅仅是测试读取USB摄像头H264帧进行rtmp推流的验证,直接在readbuffer函数使用poll读取数据,当帧率设置为30 时,存在丢帧播放花屏,这是因为使用的单线程,发送rtmp流的时候,不能去读数据,因此会丢帧。
另外也没有添加音频合成,然后在推流,后面接着弄吧,最终验证完之后,还要移植到imx6平台上。

makefile

TARGET		 = ffmpeg_usb_rtmp
LIB_PATH 	 = /usr/local/lib/

FFMPEG_LIBS = -lavutil -lavdevice -lavformat -lavcodec -lswresample -lavfilter -lswscale 
SDL_LIBS	= -lSDL2
EXTRA_LIBS  = -lz -lm -lpthread -lstdc++ -lm -lrt -lpcre
ALL_LIBS	= $(EXTRA_LIBS) $(SDL_LIBS) $(FFMPEG_LIBS)  
 
COMPILE_OPTS = -v -g -Wall -Wno-deprecated-declarations 

C_COMPILER   = gcc
C_FLAGS 	 = $(CFLAGS) $(COMPILE_OPTS) 
LD_FLAGS 	 = -L$(LIB_PATH) $(LDFLAGS)

SRC = ffmpeg_usb_rtmp.c

ALL:
	$(C_COMPILER) $(C_FLAGS) $(LD_FLAGS) $(SRC) -o $(TARGET) $(ALL_LIBS) 
clean:
	rm -rf $(TARGET) *.o *.mp4 *.wav *.h264 *.avi *.flv 

学习ffmpeg的路还有很长!

猜你喜欢

转载自blog.csdn.net/zhaoyun_zzz/article/details/86544875