模拟输入H.264流,输出封装格式文件(API版)

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/u013699869/article/details/50071813

每次从H.264文件读入一定数据量的数据,模拟输入H.264流,最终输出封装格式文件。

//H264ToContainer_Win32.h
extern "C"
{
	//@param r_frame_rate 输入的H.264流帧率
	//@param buffer_size_max 传入数据的最大尺寸
	__declspec(dllexport) AVFormatContext *format_initialise(const char *out_filename, float r_frame_rate, int buffer_size_max);

	//@param data 每次输入的数据
	//@param buffer_size 每次传入的数据尺寸可以不同,但不能大于format_initialise()的参数buffer_size_max
	__declspec(dllexport) AVFormatContext *format_data_import(AVFormatContext *ofmt_ctx, uint8_t *data, int buffer_size);

	__declspec(dllexport) int format_close(AVFormatContext *ofmt_ctx);
}

// H264ToContainer_Win32.cpp : Defines the exported functions for the DLL application.
//

#include "stdafx.h"

#define __STDC_CONSTANT_MACROS

extern "C"
{
#include "libavformat/avformat.h"
}
#include "H264ToContainer_Win32.h"

static uint8_t *temp;

AVFormatContext *format_initialise(const char *out_filename, float r_frame_rate, int buffer_size_max)
{
	AVOutputFormat *ofmt = NULL;
	AVFormatContext *ofmt_ctx = NULL;
	int ret;

	av_register_all();

	avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
	if (!ofmt_ctx) {
		printf("Could not create output context\n");
		ret = AVERROR_UNKNOWN;
		goto end;
	}
	ofmt = ofmt_ctx->oformat;
	AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_H264);
	if (!codec) {
		printf("Could not find encoder for '%s'\n",
			avcodec_get_name(AV_CODEC_ID_H264));
		goto end;
	}
	AVStream *out_stream = avformat_new_stream(ofmt_ctx, codec);
	if (!out_stream) {
		printf("Failed allocating output stream\n");
		ret = AVERROR_UNKNOWN;
		goto end;
	}
	out_stream->codec->codec_tag = 0;
	/* Some formats want stream headers to be separate. */
	if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
		out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

	printf("==========Output Information==========\n");
	av_dump_format(ofmt_ctx, 0, out_filename, 1);
	printf("======================================\n");
	//Open output file
	if (!(ofmt->flags & AVFMT_NOFILE)) {
		if (avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE) < 0) {
			printf("Could not open output file '%s'", out_filename);
			goto end;
		}
	}

	//虽然不起作用,但必须设置AVCodecContext中的width和height,
	//否则,调avformat_write_header()时,报错:dimensions not set.
	ofmt_ctx->streams[0]->codec->width = 1;
	ofmt_ctx->streams[0]->codec->height = 1;

	out_stream->r_frame_rate.num = r_frame_rate * 1000;
	out_stream->r_frame_rate.den = 1000;

	//Write file header
	if (avformat_write_header(ofmt_ctx, NULL) < 0) {
		printf("Error occurred when opening output file\n");
		goto end;
	}

	//buffer_size_max为每次传入数据的最大尺寸.
	//为temp分配空间要考虑到上一次可能剩余IDR帧尺寸最大值,预留100000字节.
	if (buffer_size_max < 60000)
	{
		//temp为临时缓冲区,存储上一次传入数据剩余的不完整帧和本次传入的数据。
		temp = (uint8_t *)malloc(5 * 32768);
	}
	else
	{
		//temp为临时缓冲区,存储上一次传入数据剩余的不完整帧和本次传入的数据。
		temp = (uint8_t *)malloc(100000 + buffer_size_max);
	}

	return ofmt_ctx;

end:
	/* close output */
	if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
		avio_close(ofmt_ctx->pb);
	avformat_free_context(ofmt_ctx);
	return 0;
}

AVFormatContext *format_data_import(AVFormatContext *ofmt_ctx, uint8_t *data, int buffer_size)
{
	//上一次传入数据剩余的不完整帧的大小。
	static int residue_len = 0;
	static int frame_index = 0;
	memcpy(temp + residue_len, data, buffer_size);
	int frame_header = 0;
	//之所以从j==1开始,是因为j==0必然是帧头,现在要找的是下一个帧头
	for (int j = 1; j <= residue_len + buffer_size - 5; j++)
	{
		if (temp[j] == 0x00 && temp[j + 1] == 0x00 && temp[j + 2] == 0x00 && temp[j + 3] == 0x01 && temp[j + 4] != 0x68)
		{
			AVPacket *pkt = av_packet_alloc();
			av_init_packet(pkt);
			pkt->size = j - frame_header;
			pkt->data = (uint8_t *)malloc(j - frame_header);
			memcpy(pkt->data, temp + frame_header, j - frame_header);
			//认为带SPS信息的为关键帧。若不正确标记关键帧,则不能正常随机定位或快进快退。
			if (temp[frame_header + 4] == 0x67)
			{
				pkt->flags |= AV_PKT_FLAG_KEY;
			}
			AVRational time_base = ofmt_ctx->streams[0]->time_base;
			//Duration between 2 frames (μs)
			int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(ofmt_ctx->streams[0]->r_frame_rate);
			pkt->pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base)*AV_TIME_BASE);
			pkt->duration = (double)calc_duration / (double)(av_q2d(time_base)*AV_TIME_BASE);
			frame_index++;
			printf("Write 1 Packet. size:%5d\tpts:%lld\n", pkt->size, pkt->pts);
			if (av_interleaved_write_frame(ofmt_ctx, pkt) < 0)
			{
				printf("Error muxing packet\n");
			}
			av_packet_free(&pkt);
			frame_header = j;
		}
		else
		{
			continue;
		}
	}
	residue_len = residue_len + buffer_size - frame_header;
	//即使有重叠区域,也允许这样进行内存拷贝,要拷贝的尾部数据会覆盖temp+frame_header以后的重叠区域。
	memcpy(temp, temp + frame_header, residue_len);

	return ofmt_ctx;

}

int format_close(AVFormatContext *ofmt_ctx)
{
	free(temp);

	//Write file trailer
	av_write_trailer(ofmt_ctx);

	/* close output */
	if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
		avio_close(ofmt_ctx->pb);
	avformat_free_context(ofmt_ctx);

	return 0;
}

// test.cpp : Defines the entry point for the console application.
//

#include "stdafx.h"
#define __STDC_CONSTANT_MACROS

extern "C"
{
#include "libavformat/avformat.h"
}
#include "H264ToContainer_Win32.h"
#define BUFFER_SIZE 32768


int _tmain(int argc, _TCHAR* argv[])
{
	const char *in_filename_v = "media files/JINUSEAN_17s.h264"; //Input file URL
	const char *out_filename = "media files/JINUSEAN_17s.mkv"; //Output file URL
	float r_frame_rate = 29.97;
	AVFormatContext *ofmt_ctx = format_initialise(out_filename, r_frame_rate, BUFFER_SIZE);
	FILE *fp_open = fopen(in_filename_v, "rb+");
	for (int i = 0; i < 200; i++)
	{
		uint8_t *buf = (uint8_t *)malloc(BUFFER_SIZE);
		fread(buf, 1, BUFFER_SIZE, fp_open);
		ofmt_ctx = format_data_import(ofmt_ctx, buf, BUFFER_SIZE);
		free(buf);
	}
	format_close(ofmt_ctx);

	return 0;
}


猜你喜欢

转载自blog.csdn.net/u013699869/article/details/50071813