音视频学习 - QT6.3.1创建QAudioSink+ ffmpeg项目进行音频解析及播放

一、前言

        在之前的文章里创建了项目、引入ffmpeg、进行视频解析并绘制在自定义QWidget。

音视频学习 - 创建QT + ffmpeg项目进行视频解析_坐望云起的博客-CSDN博客在mainwindow.ui的设计界面,拖一个Widget到主界面,然后在Widget上点击右键,然后选择提升为,在提升的类名称处输入上面自定义控件的类名。如果选择下面的全局包含,就不用再单独包含头文件了。https://skydance.blog.csdn.net/article/details/126675271        当时声音不知道如何处理,最近又翻文档,基于QAudioSink先进行最简单的音频的解析和播放,虽然遗留问题无数,但是还是先整理一下,否则很快就会全部忘掉。

二、mainwindow.cpp

        构造函数先创建QAudioFormat、QAudioSink。

MainWindow::MainWindow(QWidget *parent)
    : QMainWindow(parent)
    , ui(new Ui::MainWindow)
{
    ui->setupUi(this);

    //音频段开始
    QAudioFormat format;
    format.setSampleRate(48000);
    format.setChannelCount(2);
    format.setSampleFormat(QAudioFormat::Int16);
    QAudioDevice info = QMediaDevices::defaultAudioOutput();
    if (!info.isFormatSupported(format)) {
        qWarning() << "Raw audio format not supported by backend, cannot play audio.";
        return;
    }

    audio = new QAudioSink(info, format, this);
    audio->stop();
    io = audio->start();
}

//写入音频buffer
void MainWindow::getAudioFromFrame(uint8_t* buffer, int size)
{
    io->write((const char *)buffer, size);
}

/**
 * @brief MainWindow::on_pushButton_2_clicked
 * 视频播放按钮
 */
void MainWindow::on_pushButton_2_clicked()
{
    //启动线程
    m_thread  =  new MyThread;
    m_thread->audio = audio;

    connect(m_thread, &MyThread::getPicFromFrame,this,&MainWindow::getPicfromThread);
    connect(m_thread, &MyThread::getAudioFromFrame,this,&MainWindow::getAudioFromFrame);
    m_thread->start();
}

/**
 * @brief MainWindow::on_pushButton_2_clicked
 * 视频播放按钮
 */
void MainWindow::on_pushButton_2_clicked()
{
    //启动线程
    m_thread  =  new MyThread;
    m_thread->audio = audio;
    connect(m_thread, &MyThread::getPicFromFrame,this,&MainWindow::getPicfromThread);
    connect(m_thread, &MyThread::getAudioFromFrame,this,&MainWindow::getAudioFromFrame);
    m_thread->start();
}

三、MyThread.cpp文件

        代码十分的乱......,而且关于音频参数,也不是很了解,是一步步试出来的正常的声音的参数。

#include <MyThread.h>


#define INBUF_SIZE 4096

MyThread::MyThread()
{

}

void MyThread::run()
{
    /**
     * @brief filename 输入的视频文件
     */
    const char* filename;
    /**
     * @brief outfilename 用于转码后的输出文件(暂时没用到)
     */
    const char* outfilename;
    /**
     * @brief codecForVideo 视频编解码器
     */
    const AVCodec* codecForVideo;
    /**
     * @brief codecForAudio 音频编解码器
     */
    const AVCodec* codecForAudio;
    /**
     * @brief parserForVideo 视频解析器
     */
    AVCodecParserContext* parserForVideo;
    /**
     * @brief parserForAudio 音频解析器
     */
    AVCodecParserContext* parserForAudio;
    /**
     * @brief c_ForVideo
     */
    AVCodecContext* c_ForVideo = NULL;
    /**
     * @brief c_ForAudio
     */
    AVCodecContext* c_ForAudio = NULL;
    /**
     * @brief codecpar_ForVideo
     */
    AVCodecParameters* codecpar_ForVideo = NULL;
    /**
     * @brief codecpar_ForAudio
     */
    AVCodecParameters* codecpar_ForAudio = NULL;
    /**
     * @brief frame_ForVideo 视频帧
     */
    AVFrame* frame_ForVideo;
    /**
     * @brief pFrameBGR 视频帧解析出的BGR
     */
    AVFrame* pFrameBGR;
    /**
     * @brief frame_ForAudio 音频帧
     */
    AVFrame* frame_ForAudio;


    SwsContext* sws_ctx_ForVideo;

    SwrContext* sws_ctx_ForAudio = swr_alloc();

    uint8_t* buffer_ForVideo = nullptr;

    uint8_t* buffer_ForAudio = nullptr;
    FILE* f;

    uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
    uint8_t* data;
    size_t   data_size;
    int ret;
    AVPacket* pkt;
    AVFormatContext* inFmtCtx = NULL;
    int video_in_stream_index = -1, audio_in_stream_index = -1;
    AVCodecID src_video_id = AVCodecID::AV_CODEC_ID_NONE, src_audio_id = AVCodecID::AV_CODEC_ID_NONE;

    AVStream* stream_ForVideo;
    AVStream* stream_ForAudio;
    int fps1=30, fps2, fps3, fps4;



    //输入视频文件路径
    filename = "D:\\1.mp4";
    //输出视频文件路径(如果转码)
    outfilename = "";

    pkt = av_packet_alloc();
    if (!pkt)
        exit(1);


    // 打开输入文件
    if ((ret = avformat_open_input(&inFmtCtx, filename, NULL, NULL)) < 0) {
        return;
    }
    if ((ret = avformat_find_stream_info(inFmtCtx, NULL)) < 0) {
        return;
    }
    // 输出输入文件信息
    av_dump_format(inFmtCtx, 0, filename, 0);

    for (int i = 0; i < inFmtCtx->nb_streams; i++) {
        AVCodecParameters* codecpar = inFmtCtx->streams[i]->codecpar;
        if (codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_in_stream_index == -1) {
            src_video_id = codecpar->codec_id;
            video_in_stream_index = i;
        }
        if (codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_in_stream_index == -1) {
            src_audio_id = codecpar->codec_id;
            audio_in_stream_index = i;
        }
    }


    /* set end of buffer to 0 (this ensures that no overreading happens for damaged MPEG streams) */
    memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE);


    codecForVideo = avcodec_find_decoder(src_video_id);
    codecForAudio = avcodec_find_decoder(src_audio_id);
    if (!codecForVideo || !codecForAudio) {
        fprintf(stderr, "Codec not found\n");
        exit(1);
    }


    parserForVideo = av_parser_init(codecForVideo->id);
    parserForAudio = av_parser_init(codecForAudio->id);
    if (!parserForVideo || !parserForAudio) {
        fprintf(stderr, "parser not found\n");
        exit(1);
    }

    c_ForVideo = avcodec_alloc_context3(codecForVideo);
    c_ForAudio = avcodec_alloc_context3(codecForAudio);
    if (!c_ForVideo || !c_ForAudio) {
        fprintf(stderr, "Could not allocate video codec context\n");
        exit(1);
    }

    codecpar_ForVideo = inFmtCtx->streams[video_in_stream_index]->codecpar;
    if ((ret = avcodec_parameters_to_context(c_ForVideo, codecpar_ForVideo)) < 0) {
        return;
    }

    codecpar_ForAudio = inFmtCtx->streams[audio_in_stream_index]->codecpar;
    if ((ret = avcodec_parameters_to_context(c_ForAudio, codecpar_ForAudio)) < 0) {
        return;
    }

    /* For some codecs, such as msmpeg4 and mpeg4, width and height
       MUST be initialized there because this information is not
       available in the bitstream.
       对于某些编解码器,例如 msmpeg4 和 mpeg4,必须在此处初始化宽度和高度,因为此信息在比特流中不可用。
    */
    /* open it */
    if (avcodec_open2(c_ForVideo, codecForVideo, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }

    if (avcodec_open2(c_ForAudio, codecForAudio, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }


    f = fopen(filename, "rb");
    if (!f) {
        fprintf(stderr, "Could not open %s\n", filename);
        exit(1);
    }

    frame_ForVideo = av_frame_alloc();
    pFrameBGR = av_frame_alloc();
    frame_ForAudio = av_frame_alloc();

    if (!frame_ForVideo || !frame_ForAudio) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }

    // 初始化
    swr_alloc_set_opts(sws_ctx_ForAudio, c_ForAudio->channel_layout, AV_SAMPLE_FMT_S16, c_ForAudio->sample_rate, c_ForAudio->channel_layout, c_ForAudio->sample_fmt, c_ForAudio->sample_rate, 0, NULL);
    swr_init(sws_ctx_ForAudio);


    int64_t startTime_ForVideo, startTime_ForAudio;
    bool video_start = false, audio_start = false;

    while (av_read_frame(inFmtCtx, pkt) >= 0) {

        // 迭代结束后释放 av_read_frame 分配的 packet 内存
        //std::shared_ptr<AVPacket> packetDeleter(&pkt, av_packet_unref);

        // 说明读取的视频数据
        if (pkt->stream_index == video_in_stream_index)
        {
            if ((ret = avcodec_send_packet(c_ForVideo, pkt)) < 0) {
                //LOGD("video avcodec_send_packet fail %s", av_err2str(ret));
                //releaseSources();
                return;
            }
            while (true) {
                // 从解码缓冲区接收解码后的数据
                if ((ret = avcodec_receive_frame(c_ForVideo, frame_ForVideo)) < 0) {
                    if (ret == AVERROR_EOF) {
                        exit(1);
                        // 解码缓冲区结束了,那么也要flush编码缓冲区
                        //doEncodeVideo(NULL);
                    }
                    break;
                }

                if(!video_start)
                {
                    startTime_ForVideo = av_gettime();
                    video_start = true;
                }

                int numBytes = av_image_get_buffer_size(AV_PIX_FMT_RGB24, c_ForVideo->width, c_ForVideo->height, 1);
                if(buffer_ForVideo == nullptr) buffer_ForVideo = (uint8_t*)av_malloc(numBytes * sizeof(uint8_t));
                av_image_fill_arrays(pFrameBGR->data, pFrameBGR->linesize, buffer_ForVideo, AV_PIX_FMT_RGB24,  c_ForVideo->width, c_ForVideo->height, 1);


                sws_ctx_ForVideo = sws_getContext(codecpar_ForVideo->width, codecpar_ForVideo->height, (enum AVPixelFormat)codecpar_ForVideo->format,
                    frame_ForVideo->width, frame_ForVideo->height, AV_PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);

                // 图像转换
                sws_scale(sws_ctx_ForVideo, frame_ForVideo->data, frame_ForVideo->linesize, 0, c_ForVideo->height, pFrameBGR->data, pFrameBGR->linesize);

                QImage tempImage((uchar*)pFrameBGR->data[0], c_ForVideo->width, c_ForVideo->height, QImage::Format_RGB888);
                getPicFromFrame(tempImage);
                //msleep(1000/fps1);//1000/fps1

                AVRational time_base = inFmtCtx->streams[audio_in_stream_index]->time_base;
                AVRational time_base_q = {1, AV_TIME_BASE};
                int64_t pts_time = av_rescale_q(pkt->pts, time_base, time_base_q);
                int64_t now_time = av_gettime() - startTime_ForVideo;
                int64_t offset_time = pts_time - now_time;
                if (offset_time > 0)
                    av_usleep(offset_time);
            }
        }

        //处理音频
        double sleep_time=0;
        if (pkt->stream_index == audio_in_stream_index)
        {
            if ((ret = avcodec_send_packet(c_ForAudio, pkt)) < 0) {
                return;
            }

            while (true) {
                // 从解码缓冲区接收解码后的数据
                if ((ret = avcodec_receive_frame(c_ForAudio, frame_ForAudio)) < 0) {
                    if (ret == AVERROR_EOF) {
                        exit(1);
                        // 解码缓冲区结束了,那么也要flush编码缓冲区
                        //doEncodeVideo(NULL);
                    }
                    break;
                }

                if(!audio_start)
                {
                    startTime_ForAudio = av_gettime();
                    audio_start = true;
                }

                //qDebug()<<"码率:"<<c_ForAudio->bit_rate;
                //qDebug()<<"格式:"<<c_ForAudio->sample_fmt;
                //qDebug()<<"通道:"<<c_ForAudio->channels;
                //qDebug()<<"采样率:"<<c_ForAudio->sample_rate;
                //qDebug()<<"what:"<<frame_ForAudio->nb_samples;

                //int len = swr_convert(sws_ctx_ForAudio, &audio_out_buffer, MAX_AUDIO_FRAME_SIZE*2, (const uint8_t**)frame_ForAudio->data, frame_ForAudio->nb_samples);

                //if(buffer_ForAudio == nullptr) buffer_ForAudio = (uint8_t*)av_malloc(bufsize * sizeof(uint8_t));
                int bufsize = av_samples_get_buffer_size(nullptr, frame_ForAudio->channels, frame_ForAudio->nb_samples, AV_SAMPLE_FMT_S16, 0);
                uint8_t *buf = new uint8_t[bufsize * 2];

                swr_convert(sws_ctx_ForAudio, &buf, frame_ForAudio->nb_samples, (const uint8_t**)(frame_ForAudio->data), frame_ForAudio->nb_samples);

                sleep_time = (48000 * 16 * 2 / 8) / bufsize;
                if(audio->bytesFree()<bufsize){
                    msleep(sleep_time);
                    getAudioFromFrame(buf, bufsize);
                }else {
                    getAudioFromFrame(buf, bufsize);
                }

                AVRational time_base = inFmtCtx->streams[audio_in_stream_index]->time_base;
                AVRational time_base_q = {1, AV_TIME_BASE};
                int64_t pts_time = av_rescale_q(pkt->pts, time_base, time_base_q);
                int64_t now_time = av_gettime() - startTime_ForAudio;
                int64_t offset_time = pts_time - now_time;
                if (offset_time > 0)
                    av_usleep(offset_time);

                //io->write((const char *)buf, bufsize);
                delete[] buf;
            }
        }

        // 因为每一次读取的AVpacket的数据大小不一样,所以用完之后要释放
        av_packet_unref(pkt);
    }

    fclose(f);

    av_parser_close(parserForVideo);
    avcodec_free_context(&c_ForVideo);
    av_frame_free(&frame_ForVideo);

    av_parser_close(parserForAudio);
    avcodec_free_context(&c_ForAudio);
    av_frame_free(&frame_ForAudio);

    av_packet_free(&pkt);
}

四、问题

        音频、视频单独播放还凑合、一起播放就会出现有些断续,就需要研究同步的问题了,不是很懂,有时间继续。

猜你喜欢

转载自blog.csdn.net/bashendixie5/article/details/126789766