FFMPEG audio and video development: development of local video player (single-threaded decoding)

1. Environmental introduction

Operating system introduction: win10 64 bit

FFMPEG version: 4.4.2

QT version: 5.12.6

2. FFMPEG and X264 compile and download

The source code of ffmpeg and x264 is open source and can be compiled on any platform. Download the source code and compile the library file by yourself.

CSDN download address: (used by windows system)  https://download.csdn.net/download/xiaolong1126626497/13328939

Three, source code introduction

Version v1.
1. A total of 2 threads are used in the program. Thread 1 is the main UI thread, responsible for refreshing the image data of the main interface, and the label control is used for image data display; thread 2 is the video decoding thread, responsible for decoding audio data and video data , And then send the video picture through a signal to the main thread for refresh display. In the image display function of the main interface, the size of the current label control is obtained, and the image zoom is automatically adjusted.
2. Audio data is played directly in the video decoding thread.
4. Added total time display and current time display.
5. Added arbitrary jump function
. 6. Optimized playback progress bar display.
7. Optimized the automatic scaling of the player label, which can be based on the window size. Automatic scaling.

  Note: Because of the video decoding conversion, audio decoding and playback are all done in a single thread, if the video size is too large, there will be some cards, and the video size below 720P is no problem. A multi-threaded version will be added later.

 The video files used in the development and testing phase are all in MP4 format. It is normal to play MP4 format videos. Other formats have not been tested. There is no other format video files on the computer.

Fourth, the player running effect

 

Five, source code example

5.1 widget.h file source code

#ifndef WIDGET_H
#define WIDGET_H

#include <QWidget>
#include "video_play.h"
#include <QFileDialog>
#include "config.h"
#include <QListWidgetItem>
#include <QDesktopWidget>

QT_BEGIN_NAMESPACE
namespace Ui { class Widget; }
QT_END_NAMESPACE

//主线程
class Widget : public QWidget
{
    Q_OBJECT

public:
    Widget(QWidget *parent = nullptr);
    ~Widget();
    void SetStyle(const QString &qssFile);
    void Log_Text_Display(QPlainTextEdit *plainTextEdit_log,QString text);

    bool max_flag=false; //最大化标志
    /* 定义视频播放器的线程*/
    class Thread_FFMPEG_LaLiu thread_laliu;
private slots:
    void getCurrentTime(qint64 Sec);
    void GetSumTime(qint64 uSec);
    void Log_Display(QString text);
    void VideoDataDisplay(QImage image);

    void on_toolButton_Refresh_clicked();

    void on_toolButton_Start_Play_clicked(bool checked);

protected:
    void resizeEvent(QResizeEvent *event); //窗口大小变化事件
    void closeEvent(QCloseEvent *event); //窗口关闭
    bool eventFilter(QObject *obj, QEvent *event);
private:
    Ui::Widget *ui;
};

#endif // WIDGET_H

5.2 widget.cpp source code

#include "widget.h"
#include "ui_widget.h"
#include <QDebug>

/*
 * 设置QT界面的样式
*/
void Widget::SetStyle(const QString &qssFile) {
    QFile file(qssFile);
    if (file.open(QFile::ReadOnly)) {
        QString qss = QLatin1String(file.readAll());
        qApp->setStyleSheet(qss);
        QString PaletteColor = qss.mid(20,7);
        qApp->setPalette(QPalette(QColor(PaletteColor)));
        file.close();
    }
    else
    {
        qApp->setStyleSheet("");
    }
}

Widget::Widget(QWidget *parent)
    : QWidget(parent)
    , ui(new Ui::Widget)
{
    ui->setupUi(this);

    /*基本设置*/
    this->SetStyle(":/images/blue.css");     //设置样式表
    this->setWindowIcon(QIcon(":/log.ico")); //设置图标
    this->setWindowTitle("视频播放器");
    ui->horizontalSlider_2->installEventFilter(this);
    //连接拉流线程的图像输出信号
    connect(&thread_laliu,SIGNAL(VideoDataOutput(QImage )),this,SLOT(VideoDataDisplay(QImage )));
    //连接拉流线程的日志信息
    connect(&thread_laliu,SIGNAL(LogSend(QString)),this,SLOT(Log_Display(QString)));

    //当前时间
    connect(&thread_laliu,SIGNAL(sig_getCurrentTime(qint64)),this,SLOT(getCurrentTime(qint64)));

    //视频总时间
    connect(&thread_laliu,SIGNAL(sig_GetSumTime(qint64)),this,SLOT(GetSumTime(qint64)));

    audio_output_config.audio=QAudioDeviceInfo::defaultOutputDevice();
    qDebug()<<"系统默认声卡:"<<audio_output_config.audio.deviceName();
}

Widget::~Widget()
{
    delete ui;
}


//视频刷新显示
void Widget::VideoDataDisplay(QImage image)
{

    QPixmap my_pixmap;
    my_pixmap.convertFromImage(image);
    //设置 垂直居中
    ui->label_ImageDisplay->setAlignment(Qt::AlignHCenter|Qt::AlignVCenter);
    ui->label_ImageDisplay->setPixmap(my_pixmap);
}

/*日志显示*/
void Widget::Log_Text_Display(QPlainTextEdit *plainTextEdit_log,QString text)
{
    plainTextEdit_log->insertPlainText(text);
    //移动滚动条到底部
    QScrollBar *scrollbar = plainTextEdit_log->verticalScrollBar();
    if(scrollbar)
    {
        scrollbar->setSliderPosition(scrollbar->maximum());
    }
}

//日志显示
void Widget::Log_Display(QString text)
{
    //Log_Text_Display(ui->plainTextEdit_log,text);
    qDebug()<<text;
}

void Widget::on_toolButton_Refresh_clicked()
{
    QString filename=QFileDialog::getOpenFileName(this,"选择播放的视频","D:/",tr("*.mp4 *.wmv *.*"));
    strncpy(video_audio_decode.rtmp_url,filename.toUtf8().data(),sizeof(video_audio_decode.rtmp_url));

    //判断线程是否正在运行
    if(thread_laliu.isRunning())
    {
        video_audio_decode.run_flag=0;
        thread_laliu.quit();
        thread_laliu.wait();
    }
    //开始运行线程
    video_audio_decode.run_flag=1; //运行标志
    thread_laliu.start();
    ui->toolButton_Start_Play->setText("停止播放");
}

void Widget::on_toolButton_Start_Play_clicked(bool checked)
{

    if(checked)  //开始播放
    {
        video_audio_decode.run_flag=2; //暂停播放
        ui->toolButton_Start_Play->setText("继续播放");
    }
     else //停止播放
    {
        video_audio_decode.run_flag=1; //继续播放
        ui->toolButton_Start_Play->setText("暂停播放");
    }
}

/*
获取视频的时长
*/
void Widget::GetSumTime(qint64 uSec)
{
    qint64 Sec = uSec/1000000;

    //进度条
    ui->horizontalSlider_2->setRange(0,Sec);

    QString mStr = QString("00%1").arg(Sec/60);
    QString sStr = QString("00%1").arg(Sec%60);

    QString str = QString("%1:%2").arg(mStr.right(2)).arg(sStr.right(2));
    ui->label_SumTime->setText(str);
}

/*
获取当前音频时间
*/
void Widget::getCurrentTime(qint64 Sec)
{
    ui->horizontalSlider_2->setValue(Sec);
    QString mStr = QString("00%1").arg(Sec/60);
    QString sStr = QString("00%1").arg(Sec%60);

    QString str = QString("%1:%2").arg(mStr.right(2)).arg(sStr.right(2));
    ui->label_CurrentTime->setText(str);
}

//窗口大小变化事件
void Widget::resizeEvent(QResizeEvent *event)
{
    int height=this->geometry().height()-ui->horizontalSlider_2->height()*3-ui->toolButton_Refresh->height();
    ui->label_ImageDisplay->setGeometry(0,0,this->width(),height);

    //获取显示视频的标签控件大小
    video_audio_decode.label_size=ui->label_ImageDisplay->size();
}

//窗口关闭事件
void Widget::closeEvent(QCloseEvent *event)
{
    int ret = QMessageBox::question(this, tr("视频播放器"),
    tr("是否需要退出程序?"),QMessageBox::Yes | QMessageBox::No);

    if(ret==QMessageBox::Yes)
    {
        video_audio_decode.run_flag=0;
        thread_laliu.quit();
        thread_laliu.wait();
        event->accept();
    }
    else
    {
        event->ignore();
    }
    /*
    其中accept就是让这个关闭事件通过并顺利关闭窗口,
    ignore就是将其忽略回到窗口本身。这里可千万得注意在每一种可能性下都对event进行处理,
    以免遗漏。
    */
}

bool Widget::eventFilter(QObject *obj, QEvent *event)
{
    //解决QSlider点击不能到鼠标指定位置的问题
    if(obj==ui->horizontalSlider_2)
    {
        if (event->type()==QEvent::MouseButtonPress)           //判断类型
        {
            QMouseEvent *mouseEvent = static_cast<QMouseEvent *>(event);
            if (mouseEvent->button() == Qt::LeftButton)	//判断左键
            {
               int value = QStyle::sliderValueFromPosition(ui->horizontalSlider_2->minimum(), ui->horizontalSlider_2->maximum(), mouseEvent->pos().x(), ui->horizontalSlider_2->width());
               ui->horizontalSlider_2->setValue(value);

               //设置视频跳转
                video_audio_decode.seek_pos=value* 1000000; //转为微秒
                video_audio_decode.seek_flag=1;
            }
        }
    }
    return QObject::eventFilter(obj,event);
}

5.3 video_play.h source code

#ifndef VIDEO_PLAY_H
#define VIDEO_PLAY_H
#include "config.h"

//视频音频解码线程
class Thread_FFMPEG_LaLiu: public QThread
{
    Q_OBJECT
public:
      QAudioOutput *audio_out;
      QIODevice* audio_out_streamIn;
      Thread_FFMPEG_LaLiu()
      {
          audio_out=nullptr;
          audio_out_streamIn=nullptr;
      }
      void Audio_Out_Init();
      int ffmpeg_rtmp_client();
protected:
    void run();
signals:
    void sig_GetSumTime(qint64 uSec);
    void sig_getCurrentTime(qint64 Sec);
    void LogSend(QString text);
    void VideoDataOutput(QImage); //输出信号
};

//解码拉流时的一些全局参数
class VideoAudioDecode
{
public:
    char rtmp_url[1024]; //播放的视频地址
    char run_flag; //2 表示暂停播放 1表示运行 0表示停止
    bool seek_flag; //1 表示需要跳转    0表示不需要跳转
    quint64 seek_pos; //跳转的位置
    QSize label_size;
};
extern class VideoAudioDecode video_audio_decode;
#endif // VIDEO_PLAY_H

5.4 video_play.cpp source code

#include "video_play.h"
#define MAX_AUDIO_FRAME_SIZE  1024
class VideoAudioDecode video_audio_decode;
class AudioOuputConfiguration audio_output_config;

//线程执行起点
void Thread_FFMPEG_LaLiu::run()
{
    Audio_Out_Init();
    LogSend("开始拉流.\n");
    ffmpeg_rtmp_client();
}

//FFMPEG回调函数,返回1表示超时  0表示正常
static int interrupt_cb(void *ctx)
{
    if(video_audio_decode.run_flag==0)return 1;
    return 0;
}

//拉流
int Thread_FFMPEG_LaLiu::ffmpeg_rtmp_client()
{
    bool seek_flag=0;
    int n;
    double pts;
    quint64 audio_clock; ///音频时钟
    double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
    AVStream *audio_stream; //音频流
    quint64 tmp_audio_clock=0; //保存上一次的音频时钟
    int video_width=0;
    int video_height=0;
    AVCodec  *video_pCodec= nullptr;
    AVCodec  *audio_pCodec= nullptr;
    // audio/video stream index
    int video_stream_index = -1;
    int audio_stream_index = -1;

    AVFrame *PCM_pFrame = nullptr;
    AVFrame *RGB24_pFrame = nullptr;
    AVFrame *SRC_VIDEO_pFrame= nullptr;
    uint8_t *out_buffer_rgb= nullptr;
    int numBytes;
    struct SwsContext *img_convert_ctx=nullptr;  //用于解码后的视频格式转换
    AVPacket pkt;
    int re;
    bool send_flag=1;
    AVPacket *packet;


    //auido_out_format.setSampleRate(44100); //设置采样率以对赫兹采样。 以秒为单位,每秒采集多少声音数据的频率.
    //auido_out_format.setChannelCount(1);   //将通道数设置为通道。
    //auido_out_format.setSampleSize(16);     /*将样本大小设置为指定的sampleSize(以位为单位)通常为8或16,但是某些系统可能支持更大的样本量。*/
    //auido_out_format.setCodec("audio/pcm"); //设置编码格式
    //auido_out_format.setByteOrder(QAudioFormat::LittleEndian); //样本是小端字节顺序
    //auido_out_format.setSampleType(QAudioFormat::SignedInt); //样本类型

    //设置音频转码后输出相关参数
    //采样的布局方式
    uint64_t out_channel_layout = AV_CH_LAYOUT_MONO; //单声道音频布局
    //采样个数
    int out_nb_samples = MAX_AUDIO_FRAME_SIZE;
    //采样格式
    enum AVSampleFormat  sample_fmt = AV_SAMPLE_FMT_S16;//AV_SAMPLE_FMT_S16;
    //采样率
    int out_sample_rate = 44100;
    //通道数
    int out_channels;

   int buffer_size;
    uint8_t *buffer;
    int64_t in_channel_layout;
    struct SwrContext *convert_ctx;

    // Allocate an AVFormatContext
    AVFormatContext* format_ctx = avformat_alloc_context();

    format_ctx->interrupt_callback.callback = interrupt_cb; //--------注册回调函数

    // 打开rtsp:打开输入流并读取标题。 编解码器未打开
    const char* url =video_audio_decode.rtmp_url;// "rtmp://193.112.142.152:8888/live/abcd";
    LogSend(tr("播放的视频文件: %1\n").arg(url));
    int ret = -1;
    ret = avformat_open_input(&format_ctx, url, nullptr, nullptr);
    if(ret != 0)
    {
        LogSend(tr("无法打开视频文件: %1, return value: %2 \n").arg(url).arg(ret));
        goto ERROR;
    }

    // 读取媒体文件的数据包以获取流信息
    ret = avformat_find_stream_info(format_ctx, nullptr);
    if(ret < 0)
    {
        LogSend(tr("无法获取流信息: %1\n").arg(ret));
        return -1;
    }

    LogSend(tr("视频中流的数量: %1\n").arg(format_ctx->nb_streams));
    for(int i = 0; i < format_ctx->nb_streams; ++i)
    {
        const AVStream* stream = format_ctx->streams[i];
        LogSend(tr("编码数据的类型: %1\n").arg(stream->codecpar->codec_id));
        if(stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
        {
            //查找解码器
            video_pCodec=avcodec_find_decoder(stream->codecpar->codec_id);
            //打开解码器
            int err = avcodec_open2(stream->codec,video_pCodec,nullptr);
            if(err!=0)
            {
                  LogSend(tr("H264解码器打开失败.\n"));
                  return 0;
            }
            video_stream_index = i;
            //得到视频帧的宽高
            video_width=stream->codecpar->width;
            video_height=stream->codecpar->height;

            LogSend(tr("视频帧的尺寸(以像素为单位): (宽X高)%1x%2 像素格式: %3\n").arg(
                stream->codecpar->width).arg(stream->codecpar->height).arg(stream->codecpar->format));
        }
        else if(stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
        {
            audio_stream=format_ctx->streams[i];
            audio_stream_index = i;
            //查找解码器
            audio_pCodec=avcodec_find_decoder(stream->codecpar->codec_id);
            qDebug()<<"codec_id:"<<stream->codecpar->codec_id<<"AV_CODEC_ID_AAC:"<<AV_CODEC_ID_AAC;

            //打开解码器
            int err = avcodec_open2(stream->codec,audio_pCodec, nullptr);
            if(err!=0)
            {
                  LogSend(tr("音频解码器打开失败.\n"));
                  return 0;
            }
        }
    }

    if (video_stream_index == -1)
    {
         LogSend("没有检测到视频流.\n");
         return -1;
    }

    if (audio_stream_index == -1)
    {
        LogSend("没有检测到音频流.\n");
    }

    //初始化音频解码相关的参数
    PCM_pFrame = av_frame_alloc();// 存放解码后PCM数据的缓冲区

    //创建packet,用于存储解码前音频的数据
    packet = (AVPacket *)malloc(sizeof(AVPacket));
    av_init_packet(packet);

    //通道数
   out_channels = av_get_channel_layout_nb_channels(out_channel_layout);
    //得到每帧音频数据大小
   buffer_size = av_samples_get_buffer_size(nullptr, out_channels, out_nb_samples, sample_fmt, 1);
    //创建buffer,注意要用av_malloc---存放转码后的数据
   buffer = (uint8_t *)av_malloc(MAX_AUDIO_FRAME_SIZE * 2);

    in_channel_layout = av_get_default_channel_layout(format_ctx->streams[audio_stream_index]->codec->channels);
    //打开转码器
    // convert_ctx = swr_alloc();
    //设置转码参数
    convert_ctx = swr_alloc_set_opts(nullptr, out_channel_layout, sample_fmt, out_sample_rate, \
           in_channel_layout, format_ctx->streams[audio_stream_index]->codec->sample_fmt, format_ctx->streams[audio_stream_index]->codec->sample_rate, 0, nullptr);
    //转码后的数据
    LogSend(tr("转码_nb_samples=%1\n").arg(out_nb_samples)); //此帧描述的音频样本数(每通道
    LogSend(tr("转码_音频数据声道=%1\n").arg(out_channels));      //声道数量
    LogSend(tr("转码_音频数据采样率=%1\n").arg(out_sample_rate)); //采样率
    LogSend(tr("转码_channel_layout=%1\n").arg(out_channel_layout)); //通道布局

    //参数1:重采样上下文
    //参数2:输出的layout
    //参数3:输出的样本格式。Float, S16, S24
    //参数4:输出的样本率。可以不变。
    //参数5:输入的layout。
    //参数6:输入的样本格式。
    //参数7:输入的样本率。
    //参数8,参数9,日志,不用管,可直接传0

    //初始化转码器
    swr_init(convert_ctx);

    /*设置视频转码器*/
    SRC_VIDEO_pFrame = av_frame_alloc();
    RGB24_pFrame = av_frame_alloc();// 存放解码后YUV数据的缓冲区
    out_buffer_rgb=nullptr; //解码后的rgb数据
    //这里改成了 将解码后的YUV数据转换成RGB24
    img_convert_ctx = sws_getContext(video_width, video_height,
            format_ctx->streams[video_stream_index]->codec->pix_fmt,video_width, video_height,
            AV_PIX_FMT_RGB24, SWS_BICUBIC, nullptr, nullptr, nullptr);

   numBytes = avpicture_get_size(AV_PIX_FMT_RGB24,video_width,video_height);

    out_buffer_rgb = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
    avpicture_fill((AVPicture *) RGB24_pFrame, out_buffer_rgb, AV_PIX_FMT_RGB24,
            video_width, video_height);


    //获取视频的总时长
    emit sig_GetSumTime(format_ctx->duration);

    while(video_audio_decode.run_flag)
    {
        if(video_audio_decode.run_flag==2)
        {
            msleep(100); //暂停播放
            continue; //继续执行
        }

        //判断是否要执行跳转
        if(video_audio_decode.seek_flag)
        {
            video_audio_decode.seek_flag=0;
            seek_flag=1;
            int64_t seek_target = video_audio_decode.seek_pos;
            AVRational aVRational = {1, AV_TIME_BASE};
            if(video_stream_index >= 0)
            {
                seek_target = av_rescale_q(seek_target, aVRational,
                        format_ctx->streams[video_stream_index]->time_base);
            }
             qDebug()<<"跳转成功:"<<seek_target<<",状态:"<<av_seek_frame(format_ctx, video_stream_index, seek_target, AVSEEK_FLAG_BACKWARD);

             //刷新解码器
             avcodec_flush_buffers(format_ctx->streams[video_stream_index]->codec);
        }

        //读取一帧数据
        ret=av_read_frame(format_ctx, &pkt);
        if(ret < 0)
        {
            qDebug()<<"数据读取完毕.";
            break;
        }

        //得到音频包
        if(pkt.stream_index == audio_stream_index)
        {
            //解码声音
             re = avcodec_send_packet(format_ctx->streams[audio_stream_index]->codec,&pkt);//发送视频帧
             if (re != 0)
             {
                 av_packet_unref(&pkt);//不成功就释放这个pkt
                 continue;
             }

             re = avcodec_receive_frame(format_ctx->streams[audio_stream_index]->codec, PCM_pFrame);//接受后对视频帧进行解码
             if (re != 0)
             {
                 av_packet_unref(&pkt);//不成功就释放这个pkt
                 continue;
             }

             //转码  针对每一帧音频的处理。把一帧帧的音频作相应的重采样
            swr_convert(convert_ctx, &buffer, MAX_AUDIO_FRAME_SIZE, (const uint8_t **)PCM_pFrame->data, PCM_pFrame->nb_samples);
            //只发送一次
            if(send_flag)
            {
                send_flag=0;
                //得到PCM数据的配置信息
                LogSend(tr("原始PCM数据_nb_samples=%1\n").arg(PCM_pFrame->nb_samples)); //此帧描述的音频样本数(每通道
                LogSend(tr("原始PCM数据_音频数据声道=%1\n").arg(PCM_pFrame->channels));      //声道数量   out_channels = av_get_channel_layout_nb_channels(out_channel_layout);
                LogSend(tr("原始PCM数据_音频数据采样率=%1\n").arg(PCM_pFrame->sample_rate)); //采样率
                LogSend(tr("原始PCM数据_channel_layout=%1\n").arg(PCM_pFrame->channel_layout)); //通道布局

            }

            //得到音频时间
            if (pkt.pts != AV_NOPTS_VALUE)
            {
                audio_clock = av_q2d(audio_stream->time_base) * pkt.pts;
            }

            //已经跳转过
            if(seek_flag)
            {
                //如果当前时钟小于跳转的时钟,就释放当前这几帧数据
                if(audio_clock*1000000<video_audio_decode.seek_pos)
                {
                    av_packet_unref(&pkt);
                    continue;
                }
                else
                {
                    seek_flag=0;
                }
            }

            //每次以秒为单位向主界面发送信号
            if(tmp_audio_clock!=audio_clock)
            {
                tmp_audio_clock=audio_clock;
                emit sig_getCurrentTime(audio_clock);
            }

            if(!audio_output_config.audio.isNull())
            {
                //音频播放
                while(audio_out_streamIn->write((const char *)buffer,buffer_size)!=buffer_size)
                {

                }
            }
         }

        if(pkt.stream_index == video_stream_index)
        {
            //解码视频 frame
             re = avcodec_send_packet(format_ctx->streams[video_stream_index]->codec,&pkt);//发送视频帧
             if (re != 0)
             {
                 av_packet_unref(&pkt);//不成功就释放这个pkt
                 continue;
             }
             re = avcodec_receive_frame(format_ctx->streams[video_stream_index]->codec, SRC_VIDEO_pFrame);//接受后对视频帧进行解码
             if (re != 0)
             {
                 av_packet_unref(&pkt);//不成功就释放这个pkt
                 continue;
             }
              //转格式
             sws_scale(img_convert_ctx,
                     (uint8_t const **) SRC_VIDEO_pFrame->data,
                     SRC_VIDEO_pFrame->linesize, 0, video_height, RGB24_pFrame->data,
                     RGB24_pFrame->linesize);

            //加载图片数据
            QImage image(out_buffer_rgb,video_width,video_height,QImage::Format_RGB888);
            image=image.scaled(video_audio_decode.label_size,Qt::KeepAspectRatio, Qt::SmoothTransformation);

            // VideoDataOutput(image.scaled(640,480,Qt::KeepAspectRatio, Qt::SmoothTransformation)); //发送信号
            VideoDataOutput(image); //发送信号
        }
        av_packet_unref(&pkt);
    }

ERROR:
    if(SRC_VIDEO_pFrame)av_free(SRC_VIDEO_pFrame);
    if(RGB24_pFrame)av_free(RGB24_pFrame);
    if(out_buffer_rgb) av_free(out_buffer_rgb);

    if(img_convert_ctx)sws_freeContext(img_convert_ctx);
    if(format_ctx)
    {
        avformat_close_input(&format_ctx);//释放解封装器的空间,以防空间被快速消耗完
        avformat_free_context(format_ctx);
    }
    LogSend("视频音频解码播放器的线程退出成功.\n");
    LogSend("play exit\n");
    return 0;
}


//音频输出初始化
void Thread_FFMPEG_LaLiu::Audio_Out_Init()
{
    QAudioFormat auido_out_format;
    //设置录音的格式
    auido_out_format.setSampleRate(44100); //设置采样率以对赫兹采样。 以秒为单位,每秒采集多少声音数据的频率.
    auido_out_format.setChannelCount(1);   //将通道数设置为通道。
    auido_out_format.setSampleSize(16);     /*将样本大小设置为指定的sampleSize(以位为单位)通常为8或16,但是某些系统可能支持更大的样本量。*/
    auido_out_format.setCodec("audio/pcm"); //设置编码格式
    auido_out_format.setByteOrder(QAudioFormat::LittleEndian); //样本是小端字节顺序
    auido_out_format.setSampleType(QAudioFormat::SignedInt); //样本类型

    if(audio_output_config.audio.isNull())return;
    QAudioDeviceInfo info(audio_output_config.audio);
    if(audio_out)
    {
        delete audio_out;
        audio_out=nullptr;
    }
    audio_out = new QAudioOutput(info,auido_out_format);

    audio_out_streamIn=audio_out->start();
    LogSend("音频输出初始化成功.\n");
}

The following public account has a full set of QT, C, C++, and single-chip basic tutorials-welcome to pay attention:

 

Guess you like

Origin blog.csdn.net/xiaolong1126626497/article/details/110621872