Android FFmpeg开发(三),利用OpenSL ES实现音频渲染

上篇文章我们利用FFmpeg+ANativeWindwo实现了视频的解码和渲染,已经完成视频画面在SurfaceView上显示。还没阅读上一篇文章的同学建议先阅读:Android FFmpeg开发(二),实现视频解码和渲染

本文我们将对音频流进行解码和渲染,这样就能实现一个较完整的视频播放器的效果。具体技术选型如下:

  • 使用FFmpeg解码音频流
  • 使用OpenSL ES播放音频PCM数据

一、FFmpeg解码音频流

音频流程和视频解码流程整体类似,具体流程大家可以看上一篇文章。本文说一下音频解码相关的特有细节。

首先,是格式转换。上一篇我们也对视频解码做了格式转换(YUV->RGBA)。同样的,音频解码我们也需要按照我们预期的格式进行格式转换,具体如下所示:

AVCodecContext *codecContext = getCodecContext();
mSwrContext = swr_alloc();

av_opt_set_int(mSwrContext, "in_channel_layout", codecContext->channel_layout, 0);
av_opt_set_int(mSwrContext, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);

av_opt_set_int(mSwrContext, "in_sample_rate", codecContext->sample_rate, 0);
av_opt_set_int(mSwrContext, "out_sample_rate", 44100, 0);

av_opt_set_int(mSwrContext, "in_sample_fmt", codecContext->sample_fmt, 0);
av_opt_set_int(mSwrContext, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);

swr_init(mSwrContext);

// resample
mNbSample = av_rescale_rnd(ACC_NB_SAMPLES, AUDIO_DST_SAMPLE_RATE,
                           codecContext->sample_rate,AV_ROUND_UP);
mDstFrameDataSize = av_samples_get_buffer_size(NULL, AUDIO_DST_CHANNEL_COUNTS,
                                               mNbSample, DST_SAMPLE_FORMAT, 1);
// 分配OpenSL播放音频的帧内存
mAudioOutBuffer = (uint8_t *) malloc(mDstFrameDataSize);

我们需要把原始的音频格式(包括声道、采样率、采样格式)和目标音频的格式(包括声道、采样率、采样格式)传递给av_opt_set_int方法,然后调用swr_init初始化SwrContext上下文出来。最后,调用swr_convert,传入SwrContext即可获得目标格式的音频。如下所示:

void AudioDecoder::onFrameAvailable(AVFrame *frame) {
    
    
    LOGD("AudioDecoder::onFrameAvailable frame=%p, frame->nb_samples=%d\n", frame, frame->nb_samples);
    if (mAudioRender) {
    
    
        // 将解码出来音频帧进行重采样,采样后数据存入mAudioOutBuffer中
        int result = swr_convert(mSwrContext, &mAudioOutBuffer,mDstFrameDataSize / 2,
                                 (const uint8_t **)frame->data, frame->nb_samples);
        if (result > 0) {
    
    
        	// 利用OpenSL实现音频流渲染
            mAudioRender->renderAudioFrame(mAudioOutBuffer, mDstFrameDataSize);
        }
    }
}

最终,待渲染的音频PCM数据存在mAudioOutBuffer所指定的buffer中。接下来,将这批数据交给OpenSL ES,让OpenSL ES实现音频PCM的播放。

二、OpenSL ES渲染音频

Android的NDK提供了OpenSL ES的C接口,可以提供非常强大的音频处理。Android官方文档:OpenSL ES

在使用OpenSL ES的API之前,需要先引入OpenSL ES的头文件,代码如下:

#include <SLES/OpenSLES.h>
#include <SLES/OpenSLES_Android.h>

然后编写CMakeLists.txt,在链接阶段链接上OpenSL ES对应的so库:

target_link_libraries( # Specifies the target library.
        hello-ffmpeg

        # Links the target library to the log library
        # included in the NDK.
        ${log-lib}
        android
        OpenSLES
        ffmpeg)

到此,我们就可以使用OpenSL ES的API了。具体使用步骤如下:

1)创建引擎对象接口

int OpenSLRender::createEngine() {
    
    
    SLresult result = SL_RESULT_SUCCESS;
    do {
    
    
        result = slCreateEngine(&mEngineObj, 0, nullptr,
                                0, nullptr, nullptr);
        if (result != SL_RESULT_SUCCESS) {
    
    
            LOGD("OpenSLRender::createEngine slCreateEngine fail. result=%d\n", result);
            break;
        }

        result = (*mEngineObj)->Realize(mEngineObj, SL_BOOLEAN_FALSE);
        if (result != SL_RESULT_SUCCESS) {
    
    
            LOGD("OpenSLRender::createEngine Realize fail. result=%d\n", result);
            break;
        }

        result = (*mEngineObj)->GetInterface(mEngineObj, SL_IID_ENGINE, &mEngineEngine);
        if (result != SL_RESULT_SUCCESS) {
    
    
            LOGD("OpenSLRender::createEngine GetInterface fail. result=%d\n", result);
            break;
        }

    } while (false);

    return result;
}

2)接下来创建混音器对象

int OpenSLRender::createOutputMixer() {
    
    
    SLresult result = SL_RESULT_SUCCESS;
    do {
    
    
        const SLInterfaceID mids[1] = {
    
    SL_IID_ENVIRONMENTALREVERB};
        const SLboolean mreg[1] = {
    
    SL_BOOLEAN_FALSE};

        result = (*mEngineEngine)->CreateOutputMix(mEngineEngine, &mOutputMixObj, 1, mids, mreg);
        if (result != SL_RESULT_SUCCESS) {
    
    
            LOGD("OpenSLRender::createOutputMixer CreateOutputMix fail. result=%d\n", result);
            break;
        }

        result = (*mOutputMixObj)->Realize(mOutputMixObj, SL_BOOLEAN_FALSE);
        if (result != SL_RESULT_SUCCESS) {
    
    
            LOGD("OpenSLRender::createOutputMixer Realize fail. result=%d\n", result);
            break;
        }
    } while (false);

    return result;
}

3)创建音频播放对象,为音频播放bufferQueue设置回调

int OpenSLRender::createAudioPlayer() {
    
    

    SLDataLocator_AndroidSimpleBufferQueue android_queue = {
    
    
            SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2};
            
    SLDataFormat_PCM pcm = {
    
    
            SL_DATAFORMAT_PCM, // format type
            (SLuint32)2, // channel count
            SL_SAMPLINGRATE_44_1, // 44100HZ
            SL_PCMSAMPLEFORMAT_FIXED_16, // bits per sample
            SL_PCMSAMPLEFORMAT_FIXED_16, // container size
            SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT, // channel mask
            SL_BYTEORDER_LITTLEENDIAN // 小端字节
    };
    SLDataSource slDataSource = {
    
    &android_queue, &pcm};

    SLDataLocator_OutputMix outputMix = {
    
    SL_DATALOCATOR_OUTPUTMIX, mOutputMixObj};
    SLDataSink slDataSink = {
    
    &outputMix, nullptr};

    const SLInterfaceID ids[3] = {
    
    SL_IID_BUFFERQUEUE, SL_IID_EFFECTSEND, SL_IID_VOLUME};
    const SLboolean req[3] = {
    
    SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};

    SLresult result;

    do {
    
    
        result = (*mEngineEngine)->CreateAudioPlayer(mEngineEngine, &mAudioPlayerObj, &slDataSource,
                                                     &slDataSink, 3, ids, req);
        if (result != SL_RESULT_SUCCESS) {
    
    
            LOGD("OpenSLRender::createAudioPlayer CreateAudioPlayer fail. result=%d\n", result);
            break;
        }

        result = (*mAudioPlayerObj)->Realize(mAudioPlayerObj, SL_BOOLEAN_FALSE);
        if (result != SL_RESULT_SUCCESS) {
    
    
            LOGD("OpenSLRender::createAudioPlayer Realize mAudioPlayerObj fail. result=%d\n",
                 result);
            break;
        }

        result = (*mAudioPlayerObj)->GetInterface(mAudioPlayerObj, SL_IID_PLAY, &mAudioPlayerPlay);
        if (result != SL_RESULT_SUCCESS) {
    
    
            LOGD("OpenSLRender::createAudioPlayer GetInterface SL_IID_PLAY fail. result=%d\n",
                 result);
            break;
        }

        result = (*mAudioPlayerObj)->GetInterface(mAudioPlayerObj, SL_IID_BUFFERQUEUE,
                                                  &mBufferQueue);
        if (result != SL_RESULT_SUCCESS) {
    
    
            LOGD("OpenSLRender::createAudioPlayer GetInterface SL_IID_BUFFERQUEUE fail. result=%d\n",
                 result);
            break;
        }

        result = (*mBufferQueue)->RegisterCallback(mBufferQueue, audioPlayerCallback, this);
        if (result != SL_RESULT_SUCCESS) {
    
    
            LOGD("OpenSLRender::createAudioPlayer RegisterCallback fail. result=%d\n", result);
            break;
        }

        result = (*mAudioPlayerObj)->GetInterface(mAudioPlayerObj, SL_IID_VOLUME,
                                                  &mAudioPlayerVolume);
        if (result != SL_RESULT_SUCCESS) {
    
    
            LOGD("OpenSLRender::createAudioPlayer GetInterface SL_IID_VOLUME fail. result=%d\n",
                 result);
            break;
        }

    } while (false);

    return result;
}

上面核心流程有一步是为SLAndroidSimpleBufferQueueItf注册回调函数,这个回调函数当OpenSL ES需要数据进行播放时会调用,我们需要在回调函数中填充PCM数据。

4)填充PCM数据,通过SLAndroidSimpleBufferQueueItf接口的Enqueue方法填充音频PCM数据,如下:

// SLAndroidSimpleBufferQueueItf
(*mBufferQueue)->Enqueue(mBufferQueue, audioFrame->data, (SLuint32)audioFrame->dataSize);

5)播放

// SLPlayItf
(*mAudioPlayerPlay)->SetPlayState(mAudioPlayerPlay, SL_PLAYSTATE_PLAYING);

6)最后,资源释放

void OpenSLRender::unInit() {
    
    
    LOGD("OpenSLRender::unInit");

    if (mAudioPlayerPlay) {
    
    
        (*mAudioPlayerPlay)->SetPlayState(mAudioPlayerPlay, SL_PLAYSTATE_STOPPED);
        mAudioPlayerPlay = nullptr;
    }

    if (mAudioPlayerObj) {
    
    
        (*mAudioPlayerObj)->Destroy(mAudioPlayerObj);
        mAudioPlayerObj = nullptr;
        mBufferQueue = nullptr;
    }

    if (mOutputMixObj) {
    
    
        (*mOutputMixObj)->Destroy(mOutputMixObj);
        mOutputMixObj = nullptr;
    }

    if (mEngineObj) {
    
    
        (*mEngineObj)->Destroy(mEngineObj);
        mEngineObj = nullptr;
        mEngineEngine = nullptr;
    }
}

三、总结

本文利用FFmepg+OpenSL ES实现音频流解码和播放,至此一款简单的视频播放器就已经做出来了。

源码链接

git clone [email protected]:lorienzhang/HelloFFmpeg.git
# 检出 v3 tag 进行查看
git checkout v3

猜你喜欢

转载自blog.csdn.net/H_Zhang/article/details/124169956