使用ffmpeg编码aac和opus

在学习OBS源码过程中,看到音频编码部分,摘抄一部分重点代码供参看研究

直接上代码:

#include <util/base.h>
#include <util/circlebuf.h>
#include <util/darray.h>
#include <obs-module.h>

#include <libavutil/opt.h>
#include <libavformat/avformat.h>

#include "obs-ffmpeg-formats.h"
#include "obs-ffmpeg-compat.h"


struct enc_encoder {
	obs_encoder_t    *encoder;

	const char       *type;

	AVCodec          *codec;
	AVCodecContext   *context;

	uint8_t          *samples[MAX_AV_PLANES];
	AVFrame          *aframe;
	int64_t          total_samples;

	DARRAY(uint8_t)  packet_buffer;

	size_t           audio_planes;
	size_t           audio_size;

	int              frame_size; /* pretty much always 1024 for AAC */
	int              frame_size_bytes;
};

static inline uint64_t convert_speaker_layout(enum speaker_layout layout)
{
	switch (layout) {
	case SPEAKERS_UNKNOWN:          return 0;
	case SPEAKERS_MONO:             return AV_CH_LAYOUT_MONO;
	case SPEAKERS_STEREO:           return AV_CH_LAYOUT_STEREO;
	case SPEAKERS_2POINT1:          return AV_CH_LAYOUT_SURROUND;
	case SPEAKERS_4POINT0:          return AV_CH_LAYOUT_4POINT0;
	case SPEAKERS_4POINT1:          return AV_CH_LAYOUT_4POINT1;
	case SPEAKERS_5POINT1:          return AV_CH_LAYOUT_5POINT1_BACK;
	case SPEAKERS_7POINT1:          return AV_CH_LAYOUT_7POINT1;
	}

	/* shouldn't get here */
	return 0;
}

static inline enum speaker_layout convert_ff_channel_layout(uint64_t  channel_layout)
{
	switch (channel_layout) {
	case AV_CH_LAYOUT_MONO:              return SPEAKERS_MONO;
	case AV_CH_LAYOUT_STEREO:            return SPEAKERS_STEREO;
	case AV_CH_LAYOUT_SURROUND:          return SPEAKERS_2POINT1;
	case AV_CH_LAYOUT_4POINT0:           return SPEAKERS_4POINT0;
	case AV_CH_LAYOUT_4POINT1:           return SPEAKERS_4POINT1;
	case AV_CH_LAYOUT_5POINT1_BACK:      return SPEAKERS_5POINT1;
	case AV_CH_LAYOUT_7POINT1:           return SPEAKERS_7POINT1;
	}

	/* shouldn't get here */
	return  SPEAKERS_UNKNOWN;
}

static const char *aac_getname(void *unused)
{
	UNUSED_PARAMETER(unused);
	return "FFmpegAAC";
}

static const char *opus_getname(void *unused)
{
	UNUSED_PARAMETER(unused);
	return "FFmpegOpus";
}

static void enc_destroy(void *data)
{
	struct enc_encoder *enc = data;

	if (enc->samples[0])
		av_freep(&enc->samples[0]);
	if (enc->context)
		avcodec_close(enc->context);
	if (enc->aframe)
		av_frame_free(&enc->aframe);

	da_free(enc->packet_buffer);
	bfree(enc);
}

static bool initialize_codec(struct enc_encoder *enc)
{
	int ret;

	enc->aframe  = av_frame_alloc();
	if (!enc->aframe) {
		warn("Failed to allocate audio frame");
		return false;
	}

	ret = avcodec_open2(enc->context, enc->codec, NULL);
	if (ret < 0) {
		warn("Failed to open AAC codec: %s", av_err2str(ret));
		return false;
	}
	enc->aframe->format = enc->context->sample_fmt;
	enc->aframe->channels = enc->context->channels;
	enc->aframe->channel_layout = enc->context->channel_layout;
	enc->aframe->sample_rate = enc->context->sample_rate;

	enc->frame_size = enc->context->frame_size;
	if (!enc->frame_size)
		enc->frame_size = 1024;

	enc->frame_size_bytes = enc->frame_size * (int)enc->audio_size;

	ret = av_samples_alloc(enc->samples, NULL, enc->context->channels,
			enc->frame_size, enc->context->sample_fmt, 0);
	if (ret < 0) {
		warn("Failed to create audio buffer: %s", av_err2str(ret));
		return false;
	}

	return true;
}

static void init_sizes(struct enc_encoder *enc, audio_t *audio)
{
	const struct audio_output_info *aoi;
	enum audio_format format;

	aoi    = audio_output_get_info(audio);
	format = convert_ffmpeg_sample_format(enc->context->sample_fmt);

	enc->audio_planes = get_audio_planes(format, aoi->speakers);
	enc->audio_size   = get_audio_size(format, aoi->speakers, 1);
}

#ifndef MIN
#define MIN(x, y) ((x) < (y) ? (x) : (y))
#endif

static void *enc_create(obs_data_t *settings, obs_encoder_t *encoder,
		const char *type, const char *alt)
{
	struct enc_encoder *enc;
	int                bitrate = (int)obs_data_get_int(settings, "bitrate");
	audio_t            *audio   = obs_encoder_audio(encoder);

	avcodec_register_all();

	enc          = bzalloc(sizeof(struct enc_encoder));
	enc->encoder = encoder;
	enc->codec   = avcodec_find_encoder_by_name(type);
	enc->type    = type;

	if (!enc->codec && alt) {
		enc->codec = avcodec_find_encoder_by_name(alt);
		enc->type  = alt;
	}

	info("---------------------------------");

	if (!enc->codec) {
		warn("Couldn't find encoder");
		goto fail;
	}

	if (!bitrate) {
		warn("Invalid bitrate specified");
		return NULL;
	}

	enc->context = avcodec_alloc_context3(enc->codec);
	if (!enc->context) {
		warn("Failed to create codec context");
		goto fail;
	}

	enc->context->bit_rate    = bitrate * 1000;
	const struct audio_output_info *aoi;
	aoi = audio_output_get_info(audio);
	enc->context->channels    = (int)audio_output_get_channels(audio);
	enc->context->channel_layout = convert_speaker_layout(aoi->speakers);
	enc->context->sample_rate = audio_output_get_sample_rate(audio);
	enc->context->sample_fmt  = enc->codec->sample_fmts ?
		enc->codec->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;

	/* check to make sure sample rate is supported */
	if (enc->codec->supported_samplerates) {
		const int *rate = enc->codec->supported_samplerates;
		int cur_rate = enc->context->sample_rate;
		int closest = 0;

		while (*rate) {
			int dist = abs(cur_rate - *rate);
			int closest_dist = abs(cur_rate - closest);

			if (dist < closest_dist)
				closest = *rate;
			rate++;
		}

		if (closest)
			enc->context->sample_rate = closest;
	}

	if (strcmp(enc->codec->name, "aac") == 0) {
		av_opt_set(enc->context->priv_data, "aac_coder", "fast", 0);
	}

	info("bitrate: %" PRId64 ", channels: %d, channel_layout: %x",
			(int64_t)enc->context->bit_rate / 1000,
			(int)enc->context->channels,
			(unsigned int)enc->context->channel_layout);

	init_sizes(enc, audio);

	/* enable experimental FFmpeg encoder if the only one available */
	enc->context->strict_std_compliance = -2;

	enc->context->flags = CODEC_FLAG_GLOBAL_H;

	if (initialize_codec(enc))
		return enc;

fail:
	enc_destroy(enc);
	return NULL;
}

static void *aac_create(obs_data_t *settings, obs_encoder_t *encoder)
{
	return enc_create(settings, encoder, "aac", "libfdk_aac");
}

static void *opus_create(obs_data_t *settings, obs_encoder_t *encoder)
{
	return enc_create(settings, encoder, "libopus", "opus");
}

static bool do_encode(struct enc_encoder *enc,
		struct encoder_packet *packet, bool *received_packet)
{
	AVRational time_base = {1, enc->context->sample_rate};
	AVPacket   avpacket  = {0};
	int        got_packet;
	int        ret;

	enc->aframe->nb_samples = enc->frame_size;
	enc->aframe->pts = av_rescale_q(enc->total_samples,
			(AVRational){1, enc->context->sample_rate},
			enc->context->time_base);

	ret = avcodec_fill_audio_frame(enc->aframe, enc->context->channels,
			enc->context->sample_fmt, enc->samples[0],
			enc->frame_size_bytes * enc->context->channels, 1);
	if (ret < 0) {
		warn("avcodec_fill_audio_frame failed: %s", av_err2str(ret));
		return false;
	}

	enc->total_samples += enc->frame_size;

#if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(57, 40, 101)
	ret = avcodec_send_frame(enc->context, enc->aframe);
	if (ret == 0)
		ret = avcodec_receive_packet(enc->context, &avpacket);

	got_packet = (ret == 0);

	if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
		ret = 0;
#else
	ret = avcodec_encode_audio2(enc->context, &avpacket, enc->aframe,
			&got_packet);
#endif
	if (ret < 0) {
		warn("avcodec_encode_audio2 failed: %s", av_err2str(ret));
		return false;
	}

	*received_packet = !!got_packet;
	if (!got_packet)
		return true;

	da_resize(enc->packet_buffer, 0);
	da_push_back_array(enc->packet_buffer, avpacket.data, avpacket.size);

	packet->pts  = rescale_ts(avpacket.pts, enc->context, time_base);
	packet->dts  = rescale_ts(avpacket.dts, enc->context, time_base);
	packet->data = enc->packet_buffer.array;
	packet->size = avpacket.size;
	packet->type = OBS_ENCODER_AUDIO;
	packet->timebase_num = 1;
	packet->timebase_den = (int32_t)enc->context->sample_rate;
	av_free_packet(&avpacket);
	return true;
}

static bool enc_encode(void *data, struct encoder_frame *frame,
		struct encoder_packet *packet, bool *received_packet)
{
	struct enc_encoder *enc = data;

	for (size_t i = 0; i < enc->audio_planes; i++)
		memcpy(enc->samples[i], frame->data[i], enc->frame_size_bytes);

	return do_encode(enc, packet, received_packet);
}

static void enc_defaults(obs_data_t *settings)
{
	obs_data_set_default_int(settings, "bitrate", 128);
}

static bool enc_extra_data(void *data, uint8_t **extra_data, size_t *size)
{
	struct enc_encoder *enc = data;

	*extra_data = enc->context->extradata;
	*size       = enc->context->extradata_size;
	return true;
}

static void enc_audio_info(void *data, struct audio_convert_info *info)
{
	struct enc_encoder *enc = data;
	info->format = convert_ffmpeg_sample_format(enc->context->sample_fmt);
	info->samples_per_sec = (uint32_t)enc->context->sample_rate;
	info->speakers = convert_ff_channel_layout(enc->context->channel_layout);
}

static size_t enc_frame_size(void *data)
{
	struct enc_encoder *enc =data;
	return enc->frame_size;
}

struct obs_encoder_info aac_encoder_info = {
	.id             = "ffmpeg_aac",
	.type           = OBS_ENCODER_AUDIO,
	.codec          = "AAC",
	.get_name       = aac_getname,
	.create         = aac_create,
	.destroy        = enc_destroy,
	.encode         = enc_encode,
	.get_frame_size = enc_frame_size,
	.get_defaults   = enc_defaults,
	.get_extra_data = enc_extra_data,
	.get_audio_info = enc_audio_info
};

struct obs_encoder_info opus_encoder_info = {
	.id             = "ffmpeg_opus",
	.type           = OBS_ENCODER_AUDIO,
	.codec          = "opus",
	.get_name       = opus_getname,
	.create         = opus_create,
	.destroy        = enc_destroy,
	.encode         = enc_encode,
	.get_frame_size = enc_frame_size,
	.get_defaults   = enc_defaults,
	.get_extra_data = enc_extra_data,
	.get_audio_info = enc_audio_info
};

还有一个audio-io.h头文件;

#pragma once

#include "media-io-defs.h"
#include "../util/c99defs.h"
#include "../util/util_uint128.h"

#ifdef __cplusplus
extern "C" {
#endif

#define MAX_AUDIO_MIXES     6
#define MAX_AUDIO_CHANNELS  8
#define AUDIO_OUTPUT_FRAMES 1024
#define AUDIO_SAMPLE_RATE   44100

#define TOTAL_AUDIO_SIZE \
	(MAX_AUDIO_MIXES * MAX_AUDIO_CHANNELS * \
	 AUDIO_OUTPUT_FRAMES * sizeof(float))

/*
 * Base audio output component.  Use this to create an audio output track
 * for the media.
 */

struct audio_output;
typedef struct audio_output audio_t;

enum audio_format {
	AUDIO_FORMAT_UNKNOWN,

	AUDIO_FORMAT_U8BIT,
	AUDIO_FORMAT_16BIT,
	AUDIO_FORMAT_32BIT,
	AUDIO_FORMAT_FLOAT,

	AUDIO_FORMAT_U8BIT_PLANAR,
	AUDIO_FORMAT_16BIT_PLANAR,
	AUDIO_FORMAT_32BIT_PLANAR,
	AUDIO_FORMAT_FLOAT_PLANAR,
};

/**
 * The speaker layout describes where the speakers are located in the room.
 * For OBS it dictates:
 *  *  how many channels are available and
 *  *  which channels are used for which speakers.
 *
 * Standard channel layouts where retrieved from ffmpeg documentation at:
 *     https://trac.ffmpeg.org/wiki/AudioChannelManipulation
 */
enum speaker_layout {
	SPEAKERS_UNKNOWN,   /**< Unknown setting, fallback is stereo. */
	SPEAKERS_MONO,      /**< Channels: MONO */
	SPEAKERS_STEREO,    /**< Channels: FL, FR */
	SPEAKERS_2POINT1,   /**< Channels: FL, FR, LFE */
	SPEAKERS_4POINT0,   /**< Channels: FL, FR, FC, RC */
	SPEAKERS_4POINT1,   /**< Channels: FL, FR, FC, LFE, RC */
	SPEAKERS_5POINT1,   /**< Channels: FL, FR, FC, LFE, RL, RR */
	SPEAKERS_7POINT1=8, /**< Channels: FL, FR, FC, LFE, RL, RR, SL, SR */
};

struct audio_data {
	uint8_t             *data[MAX_AV_PLANES];
	uint32_t            frames;
	uint64_t            timestamp;
};

struct audio_output_info {
	const char          *name;

	uint32_t            samples_per_sec;
	enum audio_format   format;
	enum speaker_layout speakers;
};

struct audio_convert_info {
	uint32_t            samples_per_sec;
	enum audio_format   format;
	enum speaker_layout speakers;
};

static inline uint32_t get_audio_channels(enum speaker_layout speakers)
{
	switch (speakers) {
	case SPEAKERS_MONO:             return 1;
	case SPEAKERS_STEREO:           return 2;
	case SPEAKERS_2POINT1:          return 3;
	case SPEAKERS_4POINT0:          return 4;
	case SPEAKERS_4POINT1:          return 5;
	case SPEAKERS_5POINT1:          return 6;
	case SPEAKERS_7POINT1:          return 8;
	case SPEAKERS_UNKNOWN:          return 0;
	}

	return 0;
}

static inline size_t get_audio_bytes_per_channel(enum audio_format format)
{
	switch (format) {
	case AUDIO_FORMAT_U8BIT:
	case AUDIO_FORMAT_U8BIT_PLANAR:
		return 1;

	case AUDIO_FORMAT_16BIT:
	case AUDIO_FORMAT_16BIT_PLANAR:
		return 2;

	case AUDIO_FORMAT_FLOAT:
	case AUDIO_FORMAT_FLOAT_PLANAR:
	case AUDIO_FORMAT_32BIT:
	case AUDIO_FORMAT_32BIT_PLANAR:
		return 4;

	case AUDIO_FORMAT_UNKNOWN:
		return 0;
	}

	return 0;
}

static inline bool is_audio_planar(enum audio_format format)
{
	switch (format) {
	case AUDIO_FORMAT_U8BIT:
	case AUDIO_FORMAT_16BIT:
	case AUDIO_FORMAT_32BIT:
	case AUDIO_FORMAT_FLOAT:
		return false;

	case AUDIO_FORMAT_U8BIT_PLANAR:
	case AUDIO_FORMAT_FLOAT_PLANAR:
	case AUDIO_FORMAT_16BIT_PLANAR:
	case AUDIO_FORMAT_32BIT_PLANAR:
		return true;

	case AUDIO_FORMAT_UNKNOWN:
		return false;
	}

	return false;
}

static inline size_t get_audio_planes(enum audio_format format,
		enum speaker_layout speakers)
{
	return (is_audio_planar(format) ? get_audio_channels(speakers) : 1);
}

static inline size_t get_audio_size(enum audio_format format,
		enum speaker_layout speakers, uint32_t frames)
{
	bool planar = is_audio_planar(format);

	return (planar ? 1 : get_audio_channels(speakers)) *
	       get_audio_bytes_per_channel(format) *
	       frames;
}

static inline uint64_t audio_frames_to_ns(size_t sample_rate,
		uint64_t frames)
{
	util_uint128_t val;
	val = util_mul64_64(frames, 1000000000ULL);
	val = util_div128_32(val, (uint32_t)sample_rate);
	return val.low;
}

static inline uint64_t ns_to_audio_frames(size_t sample_rate,
		uint64_t frames)
{
	util_uint128_t val;
	val = util_mul64_64(frames, sample_rate);
	val = util_div128_32(val, 1000000000);
	return val.low;
}

#define AUDIO_OUTPUT_SUCCESS       0
#define AUDIO_OUTPUT_INVALIDPARAM -1
#define AUDIO_OUTPUT_FAIL         -2

EXPORT int audio_output_open(audio_t **audio, struct audio_output_info *info);
EXPORT void audio_output_close(audio_t *audio);

typedef void (*audio_output_callback_t)(void *param, struct audio_data *data);

EXPORT bool audio_output_connect(audio_t *video,
		const struct audio_convert_info *conversion,
		audio_output_callback_t callback, void *param);
EXPORT void audio_output_disconnect(audio_t *video,
		audio_output_callback_t callback, void *param);

EXPORT bool audio_output_active(const audio_t *audio);

EXPORT size_t audio_output_get_block_size(const audio_t *audio);
EXPORT size_t audio_output_get_planes(const audio_t *audio);
EXPORT size_t audio_output_get_channels(const audio_t *audio);
EXPORT uint32_t audio_output_get_sample_rate(const audio_t *audio);
EXPORT const struct audio_output_info *audio_output_get_info(
		const audio_t *audio);
EXPORT int audio_output_receive_frame(struct audio_output *audio, uint8_t *data, size_t len);
EXPORT int audio_output_get_receive_frame(struct audio_output *audio, struct audio_data *out_frame);

#ifdef __cplusplus
}
#endif

猜你喜欢

转载自blog.csdn.net/tong5956/article/details/84876178