最简单的基于FFMPEG的封装格式转换器(C++Qt 版)

最简单的基于FFMPEG的封装格式转换器(C++Qt 版)

这篇博客是我上篇博客的延续。建议大家先看看我上篇博客:

https://blog.csdn.net/liyuanbhu/article/details/121715005

之所以写这篇博客,是因为 ffmpeg 的 API 并不是那么友好。用面向对象的方式重新组织一下,代码会易读很多。下面先贴改写后的代码,大家可以比较一下,是否清爽了很多。

    QlyAVFormatContext inFile, outFile;
    inFile.openFile(QString("D:\\AV36_1.avi"));
    inFile.dumpFormat();
    QSet<AVMediaType> type;    type << AVMEDIA_TYPE_VIDEO << AVMEDIA_TYPE_AUDIO;
    QVector<QlyAVStream> inStreams = inFile.findStreams(type);

    outFile.createFile(QString(), QString("D:\\AV36_1-qt.mkv"));
    outFile.setStreams(inStreams);
    outFile.writeHeader();

    QlyAVPacket pkt;
    while(inFile.readFrame(pkt, type))
    {
        AVRational in_tb = inFile.rawStream(pkt.streamIndex())->time_base;
        outFile.writeFrame(pkt, in_tb, true);
        pkt.unref();
    }
    outFile.writeTrailer();

这个代码基本不需要解释。QlyAVFormatContext 封装了 AVFormatContext 。QlyAVStream 封装了 AVStream。QlyAVPacket 封装了 AVPacket。

inFile.openFile(QString("D:\\AV36_1.avi"));

这么简单一句就包含了原来的好几行代码:

int QlyAVFormatContext::openFile(QString url)
{
    m_url = url;
    isOutput = false;
    if(pFormatCtx) avformat_free_context(pFormatCtx);
    pFormatCtx = avformat_alloc_context();
    errorcode = avformat_open_input(&pFormatCtx, url.toLocal8Bit().constData(), nullptr, nullptr);
    if (errorcode < 0) return errorcode;
    errorcode = avformat_find_stream_info(pFormatCtx, nullptr);
    return errorcode;
}

并且由于C++ 有析构函数,所以释放资源的活可以自动完成。不需要我们自己费心:

QlyAVFormatContext::~QlyAVFormatContext()
{
    if(isOutput && pFormatCtx)
    {
        closeOutputFile();
    }
    else if(pFormatCtx)
    {
        avformat_close_input(&pFormatCtx);
    }
    avformat_free_context(pFormatCtx);
}

输出文件中添加 stream 也变得很简单:

QSet<AVMediaType> type;    type << AVMEDIA_TYPE_VIDEO << AVMEDIA_TYPE_AUDIO;
QVector<QlyAVStream> inStreams = inFile.findStreams(type);
outFile.setStreams(inStreams);

这里只提取了 音频流和视频流,忽略了其他的流。之后写文件头和文件尾还需要我们明确的写出来。这里应该还有改进的余地。下一版会进一步改进,把这两条语句也省了。

outFile.writeHeader();
// 这里是写数据帧的代码
outFile.writeTrailer();

最后是重点,从原始文件中读取数据,写到输出文件中。

QlyAVPacket pkt;
while(inFile.readFrame(pkt, type))
{
    AVRational in_tb = inFile.rawStream(pkt.streamIndex())->time_base;
    outFile.writeFrame(pkt, in_tb, true);
    pkt.unref();
}

这一块不太满意的地方是还要 pkt.unref() 。有可能可以在 readFrame 时自动的先 unref 这个 packet。

下面给出各个类的完整代码,首先是 QlyAVPacket:

#ifndef QLYAVPACKET_H
#define QLYAVPACKET_H

extern "C" {
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
}

class QlyAVPacket
{
public:
    QlyAVPacket();
    ~QlyAVPacket();
    unsigned int streamIndex() {return m_packet.stream_index;}
    void setStreamIndex(unsigned int index);
    void changeTimeBase(AVRational bq, AVRational cq, enum AVRounding rnd);
    void unref();
    AVPacket *ptr() {return &m_packet;}
    AVPacket m_packet;
    AVRational m_timeBase;
};

#endif // QLYAVPACKET_H

#include "QlyAVPacket.h"

QlyAVPacket::QlyAVPacket()
{

}

QlyAVPacket::~QlyAVPacket()
{
    av_packet_unref(&m_packet);
}

void QlyAVPacket::unref()
{
    av_packet_unref(&m_packet);
}

void QlyAVPacket::changeTimeBase(AVRational bq, AVRational cq, enum AVRounding rnd)
{
    m_packet.pts = av_rescale_q_rnd(m_packet.pts, bq, cq, rnd);
    m_packet.dts = av_rescale_q_rnd(m_packet.dts, bq, cq, rnd);
    m_packet.duration = av_rescale_q(m_packet.duration, bq, cq);
    m_packet.pos = -1;
}

void QlyAVPacket::setStreamIndex(unsigned int index)
{
    m_packet.stream_index = index;
}

之后是 QlyAVStream:

#ifndef QLYAVSTREAM_H
#define QLYAVSTREAM_H
#include<QtGlobal>

extern "C" {
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
}

class QlyAVStream
{
    friend class QlyAVFormatContext;
public:
    QlyAVStream() = default;
    QlyAVStream(AVStream *stream, int inStreamIndex = -1, int outStreamIndex = -1);
    QlyAVStream(const QlyAVStream &source);
    QlyAVStream & operator= (const QlyAVStream & source);
    AVCodecParameters * codecpar() {return m_stream->codecpar;}

    AVStream *m_stream;
    int m_inStreamIndex; //在 AVFormatContext 中的 index
    int m_outStreamIndex;
};

Q_DECLARE_TYPEINFO(QlyAVStream,
                   Q_PRIMITIVE_TYPE);


#endif // QLYAVSTREAM_H

#include "QlyAVStream.h"

QlyAVStream::QlyAVStream(AVStream *stream, int inStreamIndex, int outStreamIndex)
{
    m_stream = stream;
    m_inStreamIndex = inStreamIndex;
    m_outStreamIndex = outStreamIndex;
}

QlyAVStream::QlyAVStream(const QlyAVStream &source)
{
    m_stream = source.m_stream;
    m_inStreamIndex = source.m_inStreamIndex;
    m_outStreamIndex = source.m_outStreamIndex;
}

QlyAVStream & QlyAVStream::operator= (const QlyAVStream & source)
{
    m_stream = source.m_stream;
    m_inStreamIndex = source.m_inStreamIndex;
    m_outStreamIndex = source.m_outStreamIndex;
    return (*this);
}

最后是最重要的部分 QlyAVFormatContext:

#ifndef QLYAVFORMAT_H
#define QLYAVFORMAT_H
#include <QDebug>
#include <QString>
#include <QMap>
#include <QVector>
#include <QList>

extern "C" {
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
}

#include "QlyAVPacket.h"
#include "QlyAVStream.h"



class QlyAVStream;
class QlyAVFormatContext
{
public:
    QlyAVFormatContext();
    ~QlyAVFormatContext();
    static QString license();
    static QString version() ;
    static QString configuration();

    void dumpFormat();

    QVector<QlyAVStream> allStreams();
    QVector<QlyAVStream> findStreams(QSet<AVMediaType> mediaType);
    void setStreams(QVector<QlyAVStream> streams);

    AVStream * rawStream(int i);
    int outStreamIndex(QlyAVPacket &pack);
    bool readFrame(QlyAVPacket &pack, int streamIndex);
    bool readFrame(QlyAVPacket &pack, QSet<AVMediaType> type);
    bool writeFrame(QlyAVPacket &pack, AVRational timeBase, bool useStreamMap);
    int streamCount() const;
    QMap<QString, QString> metadata();

    int openFile(QString url);
    int createFile(QString formatName, QString fileName);
    void closeInputFile();
    int closeOutputFile();
    int writeHeader();
    int writeTrailer();

    QString errorString() const;

private:
    AVFormatContext *pFormatCtx;
    QMap<unsigned int, unsigned int> streamMap;// 记录了 in_stream 到 out_stream 的映射关系
    QString m_url;
    int errorcode;
    int isOutput;
    char errstr[512];
    AVRounding m_rounding;
};

QDebug operator<<(QDebug dbg, enum AVMediaType codec_type);
QDebug operator<<(QDebug dbg, AVRational r);

#endif // QLYAVFORMAT_H

#include "QlyAVFormatContext.h"
#include <QlyAVStream.h>

void msg(const char * str, int ret)
{
    static char err[512];
    if(ret < 0)
    {
        av_strerror(ret, err, 512);
        qWarning() << str << " error:" << err;
        exit(ret);
    }
    else
    {
        qDebug() << str << " : success";
    }
}

QDebug operator<<(QDebug dbg, enum AVMediaType codec_type)
{
    switch (codec_type) {
    case AVMEDIA_TYPE_UNKNOWN:
        dbg.nospace() << "AVMEDIA_TYPE_UNKNOWN";
        break;
    case AVMEDIA_TYPE_VIDEO:
        dbg.nospace() << "AVMEDIA_TYPE_VIDEO";
        break;
    case AVMEDIA_TYPE_AUDIO:
        dbg.nospace() << "AVMEDIA_TYPE_AUDIO";
        break;
    case AVMEDIA_TYPE_DATA:
        dbg.nospace() << "AVMEDIA_TYPE_DATA";
        break;
    case AVMEDIA_TYPE_SUBTITLE:
        dbg.nospace() << "AVMEDIA_TYPE_SUBTITLE";
        break;
    case AVMEDIA_TYPE_ATTACHMENT:
        dbg.nospace() << "AVMEDIA_TYPE_ATTACHMENT";
        break;
    case AVMEDIA_TYPE_NB:
        dbg.nospace() << "AVMEDIA_TYPE_NB";
        break;
    }
    return dbg.maybeSpace();
}


QDebug operator<<(QDebug dbg, AVRational r)
{
    dbg.nospace() << r.num << "/" << r.den;
    return dbg.maybeSpace();
}

QlyAVFormatContext::QlyAVFormatContext()
    :pFormatCtx(nullptr),
      isOutput(false),
      m_rounding((AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX))
{
    //av_register_all();
    //avformat_network_init();
}

void QlyAVFormatContext::dumpFormat()
{
    av_dump_format(pFormatCtx, 0, m_url.toLocal8Bit().constData(), isOutput);
}

QlyAVFormatContext::~QlyAVFormatContext()
{
    if(isOutput && pFormatCtx)
    {
        closeOutputFile();
    }
    else if(pFormatCtx)
    {
        avformat_close_input(&pFormatCtx);
    }
    avformat_free_context(pFormatCtx);
}

int QlyAVFormatContext::writeTrailer()
{
    errorcode = av_write_trailer(pFormatCtx);
    msg("QlyAVFormatContext::writeTrailer av_write_trailer", errorcode);
    return errorcode;
}

int QlyAVFormatContext::writeHeader()
{
    qDebug() << "in QlyAVFormatContext::writeHeader()";
    if (!(pFormatCtx->flags & AVFMT_NOFILE))
    {
        errorcode = avio_open(&pFormatCtx->pb, m_url.toLocal8Bit().constData(), AVIO_FLAG_WRITE);
        msg("QlyAVFormatContext::writeTrailer avio_open", errorcode);
    }
    errorcode = avformat_write_header(pFormatCtx, nullptr);
    msg("QlyAVFormatContext::writeHeader avformat_write_header", errorcode);

    return errorcode;
}

int QlyAVFormatContext::outStreamIndex(QlyAVPacket &pack)
{
    unsigned int index = pack.m_packet.stream_index;
    if(!streamMap.contains(index)) return -1;
    return streamMap[index];
}

bool QlyAVFormatContext::readFrame(QlyAVPacket &pack, QSet<AVMediaType> type)
{
    if(!pFormatCtx)
    {
        errorcode = 0;
        return false;
    }
    AVPacket * pPacket = pack.ptr();
    while(1)
    {
        errorcode = av_read_frame(pFormatCtx, pPacket);
        if(errorcode < 0) return false;
        AVStream *in_stream = pFormatCtx->streams[pPacket->stream_index];
        AVCodecParameters *in_codecpar = in_stream->codecpar;

        if (!type.contains(in_codecpar->codec_type))
        {
            av_packet_unref(pPacket);
            continue;
        }
        return true;
    }
}

bool QlyAVFormatContext::readFrame(QlyAVPacket &pack, int streamIndex)
{
    if(!pFormatCtx)
    {
        errorcode = 0;
        return false;
    }
    AVPacket * pPacket = pack.ptr();
    while(1)
    {
        errorcode = av_read_frame(pFormatCtx, pPacket);
        qDebug() << "av_read_frame:errorcode = " << errorcode;
        if(errorcode < 0) return false;
        if (pPacket->stream_index != streamIndex || streamIndex != -1)
        {
            av_packet_unref(pPacket);
            continue;
        }
        qDebug() << "find a Packet";
        return true;
    }
}

QString QlyAVFormatContext::license()
{
    return QString(avformat_license());
}

QMap<QString, QString> QlyAVFormatContext::metadata()
{
    QMap<QString, QString> data;
    AVDictionaryEntry *tag = nullptr;
    while ((tag = av_dict_get(pFormatCtx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX)))
    {
        data.insert(QString(tag->key), QString(tag->value));
    }
    return data;
}

int QlyAVFormatContext::createFile(QString formatName, QString fileName)
{
    m_url = fileName;
    isOutput = true;
    if(pFormatCtx) avformat_free_context(pFormatCtx);
    //pFormatCtx = avformat_alloc_context();
    if(formatName.isNull() || formatName.isEmpty())
    {
        errorcode = avformat_alloc_output_context2(&pFormatCtx, nullptr,
                                                   nullptr,
                                                   fileName.toLocal8Bit().constData());
    }
    else
    {
        errorcode = avformat_alloc_output_context2(&pFormatCtx, nullptr,
                                                   formatName.toLocal8Bit().constData(),
                                                   fileName.toLocal8Bit().constData());
    }

    msg("QlyAVFormatContext::createFile avformat_alloc_output_context2", errorcode);
    return errorcode;
}

int QlyAVFormatContext::closeOutputFile()
{
    /* close output */
    if (pFormatCtx && !(pFormatCtx->flags & AVFMT_NOFILE))
    {
        errorcode = avio_closep(&pFormatCtx->pb);
    }
    return errorcode;
}

QVector<QlyAVStream> QlyAVFormatContext::allStreams()
{
    QVector<QlyAVStream> list;
    for (unsigned int i = 0; i < pFormatCtx->nb_streams; i++)
    {
        AVStream *in_stream = pFormatCtx->streams[i];
        list << QlyAVStream(in_stream, i, -1);
    }
    return list;
}

bool QlyAVFormatContext::writeFrame(QlyAVPacket &pack, AVRational timeBase, bool useStreamMap)
{
    AVPacket *pPacket = pack.ptr();
    if(useStreamMap && streamMap.contains(pPacket->stream_index))
    {
        pPacket->stream_index = streamMap[pPacket->stream_index];
    }
    AVRational out_timebase = pFormatCtx->streams[pPacket->stream_index]->time_base;

    pPacket->pts = av_rescale_q_rnd(pPacket->pts, timeBase, out_timebase, m_rounding);
    pPacket->dts = av_rescale_q_rnd(pPacket->dts, timeBase, out_timebase, m_rounding);
    pPacket->duration = av_rescale_q(pPacket->duration, timeBase, out_timebase);
    pPacket->pos = -1;

    errorcode = av_interleaved_write_frame(pFormatCtx, pPacket);
    //msg("QlyAVFormatContext::writeFrame av_interleaved_write_frame", errorcode);
    return (errorcode == 0);
}

QVector<QlyAVStream> QlyAVFormatContext::findStreams(QSet<AVMediaType> mediaType)
{
    QVector<QlyAVStream> list;
    for (unsigned int i = 0; i < pFormatCtx->nb_streams; i++)
    {
        AVStream *in_stream = pFormatCtx->streams[i];
        AVCodecParameters *in_codecpar = in_stream->codecpar;
        if(mediaType.contains(in_codecpar->codec_type))
        {
            qDebug() << "in_stream->index = " << in_stream->index;
            list << QlyAVStream(in_stream, i, -1);
        }
    }
    return list;
}

void QlyAVFormatContext::setStreams(QVector<QlyAVStream> streams)
{
    streamMap.clear();
    unsigned int stream_index = 0;
    QVectorIterator<QlyAVStream> iter(streams);
    while(iter.hasNext())
    {
        QlyAVStream st = iter.next();
        AVStream * out_stream = avformat_new_stream(pFormatCtx, nullptr);
        avcodec_parameters_copy(out_stream->codecpar, st.codecpar());
        out_stream->codecpar->codec_tag = 0;
        streamMap[st.m_inStreamIndex] = stream_index;
        qDebug() << "streamMap[" << st.m_inStreamIndex << "] = " << stream_index;
        stream_index ++;
    }
    qDebug() << "QlyAVFormatContext::setStreams success, stream_index = " << stream_index - 1;
}

AVStream * QlyAVFormatContext::rawStream(int i)
{
    if(pFormatCtx) return pFormatCtx->streams[i];
    return 0;
}

int QlyAVFormatContext::streamCount() const
{
    if(pFormatCtx) return pFormatCtx->nb_streams;
    return 0;
}

QString QlyAVFormatContext::version()
{
    return QString(avformat_version());
}

QString QlyAVFormatContext::configuration()
{
    return QString(avformat_configuration());
}

void QlyAVFormatContext::closeInputFile()
{
    if(pFormatCtx)
        avformat_close_input(&pFormatCtx);
}

int QlyAVFormatContext::openFile(QString url)
{
    m_url = url;
    isOutput = false;
    if(pFormatCtx) avformat_free_context(pFormatCtx);
    pFormatCtx = avformat_alloc_context();
    errorcode = avformat_open_input(&pFormatCtx, url.toLocal8Bit().constData(), nullptr, nullptr);
    if (errorcode < 0) return errorcode;
    errorcode = avformat_find_stream_info(pFormatCtx, nullptr);
    return errorcode;
}


QString QlyAVFormatContext::errorString() const
{
    char err[512];
    av_strerror(errorcode, err, 1024);
    return QString(err);
}

至此,这个代码就完整了。后面我会在这个代码的基础上继续增加新的功能。请大家关注我后面的博客。

猜你喜欢

转载自blog.csdn.net/liyuanbhu/article/details/121744275