Since there are two video streams synthesis, and add text in the video is slightly different in the target video.
1, initializing a Filter object:
(1) call avfilter_graph_alloc () function creates a Filter objects
(2) call avfilter_graph_parse2 () Filter the description information added to the object. Create an input, a AVFilterInOut target output.
(3) call avfilter_graph_create_filter create filter media stream input;
; (4) call avfilter_graph_create_filter create an image input filter
calls avfilter_graph_create_filter create the output of filter (. 5);
(. 6) calls avfilter_link () connecting together the input terminal and the output terminal.
(7) call avfilter_graph_config () function filter configuration ..
2, using the Filter object:
(1) Use av_buffersrc_add_frame_flags () the decoded frame data pass Filter.
(2) using av_buffersink_get_frame () from the Filter to remove the processed data.
3, example code:
int AddPictureToVideoStream(string fileInputMediaStream, string pictureFileName, string fileOutput)
{
// 初始化库,加载编码器
Init();
// 打开媒体文件(本地或网络流文件URL)
if (OpenMediaInputFile(&g_inputMediaStreamContext, (char *)fileInputMediaStream.c_str()) < 0)
{
this_thread::sleep_for(chrono::seconds(10));
return 0;
}
// 打开媒体文件(图片)
if (OpenMediaInputFile(&g_inputPictureContext, (char *)pictureFileName.c_str()) < 0)
{
this_thread::sleep_for(chrono::seconds(10));
return 0;
}
// 打开媒体流和图片的解码器
int ret = InitDecodeCodec();
if (ret <0)
{
this_thread::sleep_for(chrono::seconds(10));
return 0;
}
// 指定输出视频的高宽等各个参数,初始化视频编码器(H.264)
ret = InitVideoEncoderCodec(g_decoderMediaStreamContext->width, g_decoderMediaStreamContext->height);
if (ret < 0)
{
this_thread::sleep_for(chrono::seconds(10));
return 0;
}
// 初始化过滤器
InitFilter(g_outputEncContext);
// Check validity and configure all the links and formats in the graph.
ret = avfilter_graph_config(g_filter_graph, NULL);
if (ret < 0)
{
return 0;
}
// 打开输出媒体文件
if (OpenOutputMediaFile((char *)fileOutput.c_str()) < 0)
{
this_thread::sleep_for(chrono::seconds(10));
return 0;
}
auto pSrcMediaStreamFrame = av_frame_alloc();
auto pSrcPictureFrame = av_frame_alloc();
auto filterFrame = av_frame_alloc();
auto pInputMediaStreamFrame = av_frame_alloc();
auto pInputPictureFrame = av_frame_alloc();
int got_output = 0;
int64_t firstPacketTime = 0;
int64_t outLastTime = av_gettime();
int64_t inLastTime = av_gettime();
auto packet_picture = ReadPacketFromPictureInputStream();
DecodePicturePacket(packet_picture.get(), pSrcPictureFrame);
firstPacketTime = av_gettime();
while (true)
{
outLastTime = av_gettime();
auto packet = ReadPacketFromInputMediaStream();
if (packet)
{
if (packet->stream_index == 0 && DecodeMediaStreamPacket(packet.get(), pSrcMediaStreamFrame))
{
// Set up a new reference to the data described by the source frame.
// Copy frame properties from src to dst and
// create a new reference for each AVBufferRef from src.
av_frame_ref(pInputMediaStreamFrame, pSrcMediaStreamFrame);
// 加一帧媒体流数据到buffer源,
if (av_buffersrc_add_frame_flags(g_buffersrcMediaStreamContext, pInputMediaStreamFrame, AV_BUFFERSRC_FLAG_PUSH) >= 0)
{
pSrcPictureFrame->pts = pSrcMediaStreamFrame->pts;
// 加一帧图片数据到buffer源,
if (av_buffersrc_add_frame_flags(g_buffersrcPictureContext, pSrcPictureFrame, AV_BUFFERSRC_FLAG_PUSH) >= 0)
{
ret = av_buffersink_get_frame_flags(g_buffersinkContext, filterFrame, AV_BUFFERSINK_FLAG_NO_REQUEST);
if (ret >= 0)
{
std::shared_ptr<AVPacket> pTmpPkt(static_cast<AVPacket*>(av_malloc(sizeof(AVPacket))), [&](AVPacket *p) { av_packet_free(&p); av_freep(&p); });
av_init_packet(pTmpPkt.get());
pTmpPkt->data = NULL;
pTmpPkt->size = 0;
//
// Encode a frame of video.Takes input raw video data from frame and writes the next output packet
// if available, to avpkt.The output packet does not necessarily contain data for
// the most recent frame, as encoders can delay and reorder input frames internally as needed.
//
// got_output: set to 1 by libavcodec if the output packet is non - empty, and to 0 if it is
// empty.If the function returns an error, the packet can be assumed to be invalid, and the
// value of got_packet_ptr is undefined and should not be used.
//
// 0 on success, negative error code on failure
ret = avcodec_encode_video2(g_outputEncContext, pTmpPkt.get(), filterFrame, &got_output);
if (ret >= 0 && got_output)
{
int ret = av_write_frame(g_outputContext, pTmpPkt.get());
}
}
}
}
}
av_frame_unref(filterFrame); // 减少帧对象的引用数。复位其中的字段。
av_frame_unref(pInputMediaStreamFrame);
av_frame_unref(pSrcMediaStreamFrame);
// 只输出10秒的数据。
if ((outLastTime - firstPacketTime) > (10 * 1000 * 1000))
{
break;
}
}
else break;
}
av_frame_free(&pInputMediaStreamFrame);
av_frame_free(&pSrcPictureFrame);
av_frame_free(&filterFrame);
if(g_outputs)
avfilter_inout_free(&g_outputs);
if(g_inputs)
avfilter_inout_free(&g_inputs);
avfilter_graph_free(&g_filter_graph);
av_write_trailer(g_outputContext);// 将还未输出的AVPacket输出出来。输出文件尾。
this_thread::sleep_for(chrono::seconds(2));
CloseInputContext();
CloseOutputContext();
return 0;
}
4, the project source files to download:
In the Debug - x86 compiler to run.
Own development environment project.
Open source file contained