FFMPEG音视频同步-读取摄像头并编码封装保存

版权声明:引用请注明出处 https://blog.csdn.net/quange_style/article/details/90082391

FFMPEG读取摄像头并编码封装保存

//-------------------------------------------------------------------------------------------------
参考链接1、https://blog.csdn.net/leixiaohua1020/article/details/39702113
参考链接2、https://blog.csdn.net/li_wen01/article/details/67631687

//-------------------------------------------------------------------------------------------------
音视频同步录制相关文章
//-------------------------------------------------------------------------------------------------
1、 ffmpeg-摄像头采集保存
2、 ffmpeg音视频同步-摄像头采集编码封装
3、 ffmpeg-音频正弦产生并编码封装
4、 ffmpeg-音频实时采集保存
5、 ffmpeg-音频实时采集编码封装
6、 ffmpeg音视频同步-音视频实时采集编码封装
7、 ffmpeg音视频同步-音视频实时采集编码推流
8、 ffmpeg音视频同步-音视频实时采集编码推流-优化版本
//---------------------------------------------------------------

系统环境:
系统版本:lubuntu 16.04
Ffmpge版本:ffmpeg version N-93527-g1125277
摄像头:1.3M HD WebCan
虚拟机:Oracle VM VirtualBox 5.2.22

ffmpeg 摄像头采集指令相关:

指令查看设备 ffmpeg -devices
指令录制ffmpeg -f video4linux2 -s 640*480 -i /dev/video0 -f flv test.flv
上面的-f flv 指定了封装器名称,支持的封装器可以在/libavformat/muxer_list.c查看。指定了封装器,不管文件后缀是什么,都会采用指定的封装器来封装文件。

指令指定编码器H264,帧率25,录制摄像头
ffmpeg -f video4linux2 -r 25 -s 640*480 -i /dev/video0 -f flv -vcodec libx264 test.flv

上面的 –vcodec 指定了编码器。编码器列表在/libavcodec/codec_list.c,在该结构体下可以看到对应的编码器名称。
另外,编码器desc,在/libavcodec/codec_desc.c下,一般ffprob test.flv,看到的视频流编码器信息,就是从这里获取。编码器描述与编码器通过id连接起来(ps:AV_CODEC_ID_H264)。值得注意的地方是:封装器要和编码器搭配上

名称 推出机构 流媒体 支持的视频编码 支持的音频编码 目前使用领域
AVI Microsoft Inc. 不支持 几乎所有格式 几乎所有格式 BT下载影视
MP4 MPEG 支持 MPEG-2, MPEG-4, H.264, H.263等 AAC, MPEG-1 Layers I, II, III, AC-3等 互联网视频网站
TS MPEG 支持 MPEG-1, MPEG-2, MPEG-4, H.264 MPEG-1 Layers I, II, III, AAC, IPTV,数字电视
FLV Adobe Inc. 支持 Sorenson, VP6, H.264 MP3, ADPCM, Linear PCM, AAC等 互联网视频网站
MKV CoreCodec Inc. 支持 几乎所有格式 几乎所有格式 互联网视频网站
RMVB Real Networks Inc. 支持 RealVideo 8, 9, 10 AAC, Cook Codec, RealAudio Lossless BT下载影视

上表来源于:https://blog.csdn.net/leixiaohua1020/article/details/18893769 截图

本章文档基于 《ffmpeg-摄像头采集保存》采集摄像头一帧数据并将其转化为YUV420,保存下来,再结合源码/doc/example/muxing.c例子,仅仅对视频流进行封装。

1.简介

FFmpeg中有一个和多媒体设备交互的类库:Libavdevice。使用这个库可以读取电脑(或者其他设备上)的多媒体设备的数据,或者输出数据到指定的多媒体设备上

2.源码

最简单的基于Libavdevice的摄像头数据读取一帧帧YUV数据,经过H.264编码,封装成并保存成test.mp4文件:

1.	#include <stdlib.h>  
2.	#include <stdio.h>  
3.	#include <string.h>  
4.	#include <math.h>  
5.	  
6.	#include <libavutil/avassert.h>  
7.	#include <libavutil/channel_layout.h>  
8.	#include <libavutil/opt.h>  
9.	#include <libavutil/mathematics.h>  
10.	#include <libavutil/timestamp.h>  
11.	#include <libavformat/avformat.h>  
12.	#include <libswscale/swscale.h>  
13.	#include <libswresample/swresample.h>  
14.	  
15.	#define STREAM_DURATION   10.0  
16.	#define STREAM_FRAME_RATE 25 /* 25 images/s */  
17.	#define STREAM_PIX_FMT    AV_PIX_FMT_YUV420P /* default pix_fmt */  
18.	  
19.	#define SCALE_FLAGS SWS_BICUBIC  
20.	  
21.	// a wrapper around a single output AVStream  
22.	typedef struct OutputStream {  
23.	    AVStream *st;  
24.	    AVCodecContext *enc;  
25.	  
26.	    /* pts of the next frame that will be generated */  
27.	    int64_t next_pts;  
28.	    int samples_count;  
29.	  
30.	    AVFrame *frame;  
31.	    AVFrame *tmp_frame;  
32.	  
33.	    float t, tincr, tincr2;  
34.	  
35.	    struct SwsContext *sws_ctx;  
36.	    struct SwrContext *swr_ctx;  
37.	} OutputStream;  
38.	  
39.	  
40.	typedef struct IntputDev {  
41.	  
42.	    AVCodecContext  *pCodecCtx;  
43.	    AVCodec         *pCodec;  
44.	    AVFormatContext *v_ifmtCtx;  
45.	    int  videoindex;  
46.	    struct SwsContext *img_convert_ctx;  
47.	    AVPacket *in_packet;  
48.	    AVFrame *pFrame,*pFrameYUV;  
49.	}IntputDev;  
50.	  
51.	static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)  
52.	{  
53.	    AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;  
54.	  
55.	    printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",  
56.	           av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),  
57.	           av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),  
58.	           av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),  
59.	           pkt->stream_index);  
60.	}  
61.	  
62.	static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)  
63.	{  
64.	    /* rescale output packet timestamp values from codec to stream timebase */  
65.	    av_packet_rescale_ts(pkt, *time_base, st->time_base);  
66.	    pkt->stream_index = st->index;  
67.	  
68.	    /* Write the compressed frame to the media file. */  
69.	    log_packet(fmt_ctx, pkt);  
70.	    return av_interleaved_write_frame(fmt_ctx, pkt);  
71.	}  
72.	  
73.	/* Add an output stream. */  
74.	static void add_stream(OutputStream *ost, AVFormatContext *oc,  
75.	                       AVCodec **codec,  
76.	                       enum AVCodecID codec_id)  
77.	{  
78.	    AVCodecContext *c;  
79.	    int i;  
80.	  
81.	    /* find the encoder */  
82.	    *codec = avcodec_find_encoder(codec_id);  
83.	    if (!(*codec)) {  
84.	        fprintf(stderr, "Could not find encoder for '%s'\n",  
85.	                avcodec_get_name(codec_id));  
86.	        exit(1);  
87.	    }  
88.	  
89.	    ost->st = avformat_new_stream(oc, NULL);  
90.	    if (!ost->st) {  
91.	        fprintf(stderr, "Could not allocate stream\n");  
92.	        exit(1);  
93.	    }  
94.	    ost->st->id = oc->nb_streams-1;  
95.	    c = avcodec_alloc_context3(*codec);  
96.	    if (!c) {  
97.	        fprintf(stderr, "Could not alloc an encoding context\n");  
98.	        exit(1);  
99.	    }  
100.	    ost->enc = c;  
101.	  
102.	    switch ((*codec)->type) {  
103.	    case AVMEDIA_TYPE_AUDIO:  
104.	        c->sample_fmt  = (*codec)->sample_fmts ?  
105.	            (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;  
106.	        c->bit_rate    = 64000;  
107.	        c->sample_rate = 44100;  
108.	        if ((*codec)->supported_samplerates) {  
109.	            c->sample_rate = (*codec)->supported_samplerates[0];  
110.	            for (i = 0; (*codec)->supported_samplerates[i]; i++) {  
111.	                if ((*codec)->supported_samplerates[i] == 44100)  
112.	                    c->sample_rate = 44100;  
113.	            }  
114.	        }  
115.	        c->channels        = av_get_channel_layout_nb_channels(c->channel_layout);  
116.	        c->channel_layout = AV_CH_LAYOUT_STEREO;  
117.	        if ((*codec)->channel_layouts) {  
118.	            c->channel_layout = (*codec)->channel_layouts[0];  
119.	            for (i = 0; (*codec)->channel_layouts[i]; i++) {  
120.	                if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)  
121.	                    c->channel_layout = AV_CH_LAYOUT_STEREO;  
122.	            }  
123.	        }  
124.	        c->channels        = av_get_channel_layout_nb_channels(c->channel_layout);  
125.	        ost->st->time_base = (AVRational){ 1, c->sample_rate };  
126.	        break;  
127.	  
128.	    case AVMEDIA_TYPE_VIDEO:  
129.	        c->codec_id = codec_id;  
130.	  
131.	        c->bit_rate = 400000;  
132.	        /* Resolution must be a multiple of two. */  
133.	        c->width    = 640;  
134.	        c->height   = 480;  
135.	        /* timebase: This is the fundamental unit of time (in seconds) in terms 
136.	         * of which frame timestamps are represented. For fixed-fps content, 
137.	         * timebase should be 1/framerate and timestamp increments should be 
138.	         * identical to 1. */  
139.	        ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };  
140.	        c->time_base       = ost->st->time_base;  
141.	  
142.	        c->gop_size      = 12; /* emit one intra frame every twelve frames at most */  
143.	        c->pix_fmt       = STREAM_PIX_FMT;  
144.	        if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {  
145.	            /* just for testing, we also add B-frames */  
146.	            c->max_b_frames = 2;  
147.	        }  
148.	        if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {  
149.	            /* Needed to avoid using macroblocks in which some coeffs overflow. 
150.	             * This does not happen with normal video, it just happens here as 
151.	             * the motion of the chroma plane does not match the luma plane. */  
152.	            c->mb_decision = 2;  
153.	        }  
154.	    break;  
155.	  
156.	    default:  
157.	        break;  
158.	    }  
159.	  
160.	    /* Some formats want stream headers to be separate. */  
161.	    if (oc->oformat->flags & AVFMT_GLOBALHEADER)  
162.	        c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;  
163.	}  
164.	  
165.	/**************************************************************/  
166.	  
167.	  
168.	/**************************************************************/  
169.	/* video output */  
170.	  
171.	static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)  
172.	{  
173.	    AVFrame *picture;  
174.	    int ret;  
175.	  
176.	    picture = av_frame_alloc();  
177.	    if (!picture)  
178.	        return NULL;  
179.	  
180.	    picture->format = pix_fmt;  
181.	    picture->width  = width;  
182.	    picture->height = height;  
183.	  
184.	    /* allocate the buffers for the frame data */  
185.	    ret = av_frame_get_buffer(picture, 32);  
186.	    if (ret < 0) {  
187.	        fprintf(stderr, "Could not allocate frame data.\n");  
188.	        exit(1);  
189.	    }  
190.	  
191.	    return picture;  
192.	}  
193.	  
194.	static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)  
195.	{  
196.	    int ret;  
197.	    AVCodecContext *c = ost->enc;  
198.	    AVDictionary *opt = NULL;  
199.	  
200.	    av_dict_copy(&opt, opt_arg, 0);  
201.	  
202.	    /* open the codec */  
203.	    ret = avcodec_open2(c, codec, &opt);  
204.	    av_dict_free(&opt);  
205.	    if (ret < 0) {  
206.	        fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));  
207.	        exit(1);  
208.	    }  
209.	  
210.	    /* allocate and init a re-usable frame */  
211.	    ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);  
212.	    if (!ost->frame) {  
213.	        fprintf(stderr, "Could not allocate video frame\n");  
214.	        exit(1);  
215.	    }  
216.	  
217.	  
218.	        printf("ost->frame alloc success fmt=%d w=%d h=%d\n",c->pix_fmt,c->width, c->height);  
219.	  
220.	  
221.	    /* If the output format is not YUV420P, then a temporary YUV420P 
222.	     * picture is needed too. It is then converted to the required 
223.	     * output format. */  
224.	    ost->tmp_frame = NULL;  
225.	    if (c->pix_fmt != AV_PIX_FMT_YUV420P) {  
226.	        ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);  
227.	        if (!ost->tmp_frame) {  
228.	            fprintf(stderr, "Could not allocate temporary picture\n");  
229.	            exit(1);  
230.	        }  
231.	    }  
232.	  
233.	    /* copy the stream parameters to the muxer */  
234.	    ret = avcodec_parameters_from_context(ost->st->codecpar, c);  
235.	    if (ret < 0) {  
236.	        fprintf(stderr, "Could not copy the stream parameters\n");  
237.	        exit(1);  
238.	    }  
239.	}  
240.	  
241.	  
242.	  
243.	  
244.	/* 
245.	 * encode one video frame and send it to the muxer 
246.	 * return 1 when encoding is finished, 0 otherwise 
247.	 */  
248.	static int write_video_frame1(AVFormatContext *oc, OutputStream *ost,AVFrame *frame)  
249.	{  
250.	    int ret;  
251.	    AVCodecContext *c;  
252.	    int got_packet = 0;  
253.	    AVPacket pkt = { 0 };  
254.	  
255.	    if(frame==NULL)  
256.	        return 1;  
257.	  
258.	  
259.	    c = ost->enc;  
260.	  
261.	  
262.	    av_init_packet(&pkt);  
263.	  
264.	  
265.	    /* encode the image */  
266.	    ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);  
267.	    if (ret < 0) {  
268.	        fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));  
269.	        exit(1);  
270.	    }  
271.	  
272.	  
273.	    printf("--------------video- pkt.pts=%s\n",av_ts2str(pkt.pts));  
274.	  
275.	        printf("----st.num=%d st.den=%d codec.num=%d codec.den=%d---------\n",ost->st->time_base.num,ost->st->time_base.den,  
276.	            c->time_base.num,c->time_base.den);  
277.	  
278.	  
279.	    if (got_packet) {  
280.	        ret = write_frame(oc, &c->time_base, ost->st, &pkt);  
281.	    } else {  
282.	        ret = 0;  
283.	    }  
284.	  
285.	    if (ret < 0) {  
286.	        fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));  
287.	        exit(1);  
288.	    }  
289.	  
290.	    return (frame || got_packet) ? 0 : 1;  
291.	}  
292.	  
293.	  
294.	static AVFrame *get_video_frame1(OutputStream *ost,IntputDev* input,int *got_pic)  
295.	{  
296.	  
297.	    int ret, got_picture;  
298.	        AVCodecContext *c = ost->enc;  
299.	    AVFrame * ret_frame=NULL;  
300.	    if (av_compare_ts(ost->next_pts, c->time_base,  
301.	                      STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)  
302.	        return NULL;  
303.	  
304.	    /* when we pass a frame to the encoder, it may keep a reference to it 
305.	     * internally; make sure we do not overwrite it here */  
306.	    if (av_frame_make_writable(ost->frame) < 0)  
307.	        exit(1);  
308.	      
309.	  
310.	    if(av_read_frame(input->v_ifmtCtx, input->in_packet)>=0){  
311.	        if(input->in_packet->stream_index==input->videoindex){  
312.	            ret = avcodec_decode_video2(input->pCodecCtx, input->pFrame, &got_picture, input->in_packet);  
313.	            *got_pic=got_picture;  
314.	  
315.	            if(ret < 0){  
316.	                printf("Decode Error.\n");  
317.	                av_free_packet(input->in_packet);  
318.	                return NULL;  
319.	            }  
320.	            if(got_picture){  
321.	                //sws_scale(input->img_convert_ctx, (const unsigned char* const*)input->pFrame->data, input->pFrame->linesize, 0, input->pCodecCtx->height, ost->frame->data, ost->frame->linesize);  
322.	                sws_scale(input->img_convert_ctx, (const unsigned char* const*)input->pFrame->data, input->pFrame->linesize, 0, input->pCodecCtx->height, ost->frame->data,  ost->frame->linesize);  
323.	                ost->frame->pts =ost->next_pts++;  
324.	                ret_frame= ost->frame;  
325.	                  
326.	            }  
327.	        }  
328.	        av_free_packet(input->in_packet);  
329.	    }  
330.	    return ret_frame;  
331.	}  
332.	static void close_stream(AVFormatContext *oc, OutputStream *ost)  
333.	{  
334.	    avcodec_free_context(&ost->enc);  
335.	    av_frame_free(&ost->frame);  
336.	    av_frame_free(&ost->tmp_frame);  
337.	    sws_freeContext(ost->sws_ctx);  
338.	    swr_free(&ost->swr_ctx);  
339.	}  
340.	  
341.	/**************************************************************/  
342.	/* media file output */  
343.	  
344.	int main(int argc, char **argv)  
345.	{  
346.	    OutputStream video_st = { 0 }, audio_st = { 0 };  
347.	    const char *filename;  
348.	    AVOutputFormat *fmt;  
349.	    AVFormatContext *oc;  
350.	    AVCodec *audio_codec, *video_codec;  
351.	    int ret;  
352.	    int have_video = 0, have_audio = 0;  
353.	    int encode_video = 0, encode_audio = 0;  
354.	    AVDictionary *opt = NULL;  
355.	    int i;  
356.	  
357.	    if (argc < 2) {  
358.	        printf("usage: %s output_file\n"  
359.	               "API example program to output a media file with libavformat.\n"  
360.	               "This program generates a synthetic audio and video stream, encodes and\n"  
361.	               "muxes them into a file named output_file.\n"  
362.	               "The output format is automatically guessed according to the file extension.\n"  
363.	               "Raw images can also be output by using '%%d' in the filename.\n"  
364.	               "\n", argv[0]);  
365.	        return 1;  
366.	    }  
367.	  
368.	    filename = argv[1];  
369.	    for (i = 2; i+1 < argc; i+=2) {  
370.	        if (!strcmp(argv[i], "-flags") || !strcmp(argv[i], "-fflags"))  
371.	            av_dict_set(&opt, argv[i]+1, argv[i+1], 0);  
372.	    }  
373.	  
374.	    /* allocate the output media context */  
375.	    avformat_alloc_output_context2(&oc, NULL, NULL, filename);  
376.	    if (!oc) {  
377.	        printf("Could not deduce output format from file extension: using MPEG.\n");  
378.	        avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);  
379.	    }  
380.	    if (!oc)  
381.	        return 1;  
382.	  
383.	  
384.	  
385.	//********add camera read***********//  
386.	    IntputDev video_input = { 0 };  
387.	    AVCodecContext  *pCodecCtx;  
388.	    AVCodec         *pCodec;  
389.	       AVFormatContext *v_ifmtCtx;  
390.	  
391.	//Register Device  
392.	    avdevice_register_all();  
393.	  
394.	    v_ifmtCtx = avformat_alloc_context();  
395.	  
396.	  
397.	     //Linux  
398.	    AVInputFormat *ifmt=av_find_input_format("video4linux2");  
399.	    if(avformat_open_input(&v_ifmtCtx,"/dev/video0",ifmt,NULL)!=0){  
400.	        printf("Couldn't open input stream./dev/video0\n");  
401.	        return -1;  
402.	    }  
403.	   
404.	   
405.	    if(avformat_find_stream_info(v_ifmtCtx,NULL)<0)  
406.	    {  
407.	        printf("Couldn't find stream information.\n");  
408.	        return -1;  
409.	    }  
410.	  
411.	    int videoindex=-1;  
412.	    for(i=0; i<v_ifmtCtx->nb_streams; i++)   
413.	    if(v_ifmtCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)  
414.	    {  
415.	        videoindex=i;  
416.	        break;  
417.	    }  
418.	    if(videoindex==-1)  
419.	    {  
420.	        printf("Couldn't find a video stream.\n");  
421.	        return -1;  
422.	    }  
423.	          
424.	    pCodecCtx=v_ifmtCtx->streams[videoindex]->codec;  
425.	    pCodec=avcodec_find_decoder(pCodecCtx->codec_id);  
426.	    if(pCodec==NULL)  
427.	    {  
428.	        printf("Codec not found.\n");  
429.	        return -1;  
430.	    }  
431.	    if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)  
432.	    {  
433.	        printf("Could not open codec.\n");  
434.	        return -1;  
435.	    }  
436.	  
437.	    AVFrame *pFrame,*pFrameYUV;  
438.	    pFrame=av_frame_alloc();  
439.	    pFrameYUV=av_frame_alloc();  
440.	    unsigned char *out_buffer=(unsigned char *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));  
441.	    avpicture_fill((AVPicture *)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);  
442.	  
443.	    printf("camera width=%d height=%d \n",pCodecCtx->width, pCodecCtx->height);  
444.	  
445.	  
446.	    struct SwsContext *img_convert_ctx;  
447.	    img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);   
448.	    AVPacket *in_packet=(AVPacket *)av_malloc(sizeof(AVPacket));  
449.	  
450.	  
451.	    video_input.img_convert_ctx=img_convert_ctx;  
452.	    video_input.in_packet=in_packet;  
453.	  
454.	    video_input.pCodecCtx=pCodecCtx;  
455.	    video_input.pCodec=pCodec;  
456.	       video_input.v_ifmtCtx=v_ifmtCtx;  
457.	    video_input.videoindex=videoindex;  
458.	    video_input.pFrame=pFrame;  
459.	    video_input.pFrameYUV=pFrameYUV;  
460.	  
461.	//******************************//  
462.	  
463.	    fmt = oc->oformat;  
464.	  
465.	    /* Add the audio and video streams using the default format codecs 
466.	     * and initialize the codecs. */  
467.	  
468.	        printf( "fmt->video_codec = %d\n", fmt->video_codec);  
469.	  
470.	    if (fmt->video_codec != AV_CODEC_ID_NONE) {  
471.	        add_stream(&video_st, oc, &video_codec, fmt->video_codec);  
472.	        have_video = 1;  
473.	        encode_video = 1;  
474.	    }  
475.	  
476.	    /* Now that all the parameters are set, we can open the audio and 
477.	     * video codecs and allocate the necessary encode buffers. */  
478.	    if (have_video)  
479.	        open_video(oc, video_codec, &video_st, opt);  
480.	  
481.	  
482.	    av_dump_format(oc, 0, filename, 1);  
483.	  
484.	    /* open the output file, if needed */  
485.	    if (!(fmt->flags & AVFMT_NOFILE)) {  
486.	        ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);  
487.	        if (ret < 0) {  
488.	            fprintf(stderr, "Could not open '%s': %s\n", filename,  
489.	                    av_err2str(ret));  
490.	            return 1;  
491.	        }  
492.	    }  
493.	  
494.	    /* Write the stream header, if any. */  
495.	    ret = avformat_write_header(oc, &opt);  
496.	    if (ret < 0) {  
497.	        fprintf(stderr, "Error occurred when opening output file: %s\n",  
498.	                av_err2str(ret));  
499.	        return 1;  
500.	    }  
501.	  
502.	    int got_pic;  
503.	  
504.	    while (encode_video ) {  
505.	        /* select the stream to encode */  
506.	            //encode_video = !write_video_frame(oc, &video_st);  
507.	        AVFrame *frame=get_video_frame1(&video_st,&video_input,&got_pic);  
508.	        if(!got_pic)  
509.	        {  
510.	            usleep(10000);  
511.	            continue;  
512.	  
513.	        }  
514.	        encode_video = !write_video_frame1(oc, &video_st,frame);  
515.	    }  
516.	  
517.	  
518.	    /* Write the trailer, if any. The trailer must be written before you 
519.	     * close the CodecContexts open when you wrote the header; otherwise 
520.	     * av_write_trailer() may try to use memory that was freed on 
521.	     * av_codec_close(). */  
522.	    av_write_trailer(oc);  
523.	  
524.	    sws_freeContext(video_input.img_convert_ctx);  
525.	  
526.	    
527.	    avcodec_close(video_input.pCodecCtx);  
528.	    av_free(video_input.pFrameYUV);  
529.	    av_free(video_input.pFrame);      
530.	    avformat_close_input(&video_input.v_ifmtCtx);  
531.	      
532.	  
533.	    /* Close each codec. */  
534.	    if (have_video)  
535.	        close_stream(oc, &video_st);  
536.	  
537.	    if (!(fmt->flags & AVFMT_NOFILE))  
538.	        /* Close the output file. */  
539.	        avio_closep(&oc->pb);  
540.	  
541.	    /* free the stream */  
542.	    avformat_free_context(oc);  
543.	  
544.	    return 0;  
545.	}  

3.验证

3.1 编译

1.	#!/bin/sh  
2.	export PKG_CONFIG_PATH=/home/quange/ffmpeg_build/lib/pkgconfig/:$PKG_CONFIG_PATH  
3.	gcc ffmpeg_get_camera_muxing.c -g -o ffmpeg_get_camera_muxing.out  -lSDLmain -lSDL  `pkg-config "libavcodec" --cflags --libs` `pkg-config "libavformat" --cflags --libs` `pkg-config "libavutil" --cflags --libs` `pkg-config "libswscale" --cflags --libs` `pkg-config "libavdevice" --cflags --libs`

3.2 结果测试

使用软件vlc打开test.mp4
test.mp4

猜你喜欢

转载自blog.csdn.net/quange_style/article/details/90082391