(五) AudioTrack播放pcm音频

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/huanghuangjin/article/details/82807341

java

public class AudioTrackActivity extends BaseActivity {

    public static void startAudioTrackActivity(Activity activity) {
        activity.startActivity(new Intent(activity, AudioTrackActivity.class));
    }

    @BindView(R.id.tv_audiotrack_status) TextView mStatusTv;
    private AudioTrack mAudioTrack;
    private boolean isLocalPlaying = true;

    @Override
    protected void onCreate(@Nullable Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);
        setContentView(R.layout.activity_audiotrck);
        mUnbinder = ButterKnife.bind(this);

        mAudioTrack = createAudioTrack(44100, 2);
    }

    /*
        AudioTrack 主要函数:

            开始播放
            public void play()throws IllegalStateException{}

            停止播放音频数据,如果是STREAM模式,会等播放完最后写入buffer的数据才会停止。如果立即停止,要调用pause()方法,然后调用flush方法,会舍弃还没有播放的数据
            public void stop()throws IllegalStateException{}

            暂停播放,调用play()重新开始播放
            public void pause()throws IllegalStateException {}

            只在模式为STREAM下可用。将音频数据刷进等待播放的队列,任何写入的数据如果没有提交的话,都会被舍弃,但是并不能保证所有用于数据的缓冲空间都可用于后续的写入。
            public void flush() {}

            释放本地AudioTrack资源
            public void release() {}

            返回当前的播放状态
            public int getPlayState() {}
     */
    /**
     * 创建一个AudioTrac对象,用于播放, jni 中会调用此函数
     * @param sampleRateInHz 采样率 44100
     * @param nb_channels 双声道
     * @return
     */
    public AudioTrack createAudioTrack(int sampleRateInHz, int nb_channels){
        //固定格式的音频码流
        int audioFormat = AudioFormat.ENCODING_PCM_16BIT;
        LogUtils.d("mydebug---", "createAudioTrack nb_channels : "+nb_channels);
        //声道布局
        int channelConfig;
        if(nb_channels == 1) {
            channelConfig = android.media.AudioFormat.CHANNEL_OUT_MONO;
        } else if (nb_channels == 2) {
            channelConfig = android.media.AudioFormat.CHANNEL_OUT_STEREO;
        } else {
            channelConfig = android.media.AudioFormat.CHANNEL_OUT_STEREO;
        }

        int bufferSizeInBytes = AudioTrack.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);

        AudioTrack audioTrack = new AudioTrack(
                AudioManager.STREAM_MUSIC,
                sampleRateInHz, channelConfig,
                audioFormat,
                bufferSizeInBytes, AudioTrack.MODE_STREAM);
        //播放
        //audioTrack.play();
        //写入PCM
        //audioTrack.write(audioData, offsetInBytes, sizeInBytes);
        return audioTrack;
    }

    // 保存yuv,解码完成,供jni调用
    public void setStatus() {
        LogUtils.d("mydebug---", "setStatus thread : "+Thread.currentThread()); // setStatus thread : Thread[Thread-2,5,main]
        runOnUiThread(new Runnable() {
            @Override
            public void run() {
                mStatusTv.setText("完成!");
            }
        });
    }

    @OnClick({R.id.btn_audiotrack_localplay, R.id.btn_audiotrack_jniplay, R.id.btn_audiotrack_play, R.id.btn_audiotrack_stop})
    void click(View view) {
        final String sdcrad = Environment.getExternalStorageDirectory().getAbsolutePath();
        switch (view.getId()) {
            case R.id.btn_audiotrack_localplay: // 播放本地pcm音频
                isLocalPlaying = true;
                new Thread(){
                    @Override
                    public void run() {
                        mAudioTrack.play();
                        File file = new File(sdcrad+"/test.pcm");
                        FileInputStream fis = null;
                        try {
                            fis = new FileInputStream(file);
                            byte buf[] = new byte[8192]; // 这里的 8192 是因为我知道该本地音频每帧大小就是这个,不是8192也可以,但是下面的sleep就要去掉,否则音频会卡顿杂音
                            int len = -1;
                            while (isLocalPlaying && (len = fis.read(buf))!=-1) {
                                mAudioTrack.write(buf, 0, len);
//                                Thread.sleep(16); // sleep 16毫秒是因为1秒60帧的时间间隔就是16,不sleep也没关系,只是语速会快一点
                            }
                        } catch (Exception e) {
                            e.printStackTrace();
                        } finally {
                            if (fis!=null) {
                                try {
                                    fis.close();
                                } catch (IOException e) {
                                    e.printStackTrace();
                                }
                            }
                        }
                    }
                }.start();
                break;
            case R.id.btn_audiotrack_jniplay: // jni 中播放本地音频,不解码
                new Thread(){
                    @Override
                    public void run() {
                        localplay(sdcrad+"/test.pcm");
                    }
                }.start();
                break;
            case R.id.btn_audiotrack_play: // 解码mp4,使用 AudioTrack 播放
                mStatusTv.setVisibility(View.VISIBLE);
                mStatusTv.setText("AudioTrack播放PCM,解码中..");
                new Thread(){
                    @Override
                    public void run() {
                        play(sdcrad+"/1080.mp4", sdcrad+"/audiotrack.pcm");
                    }
                }.start();
                break;
            case R.id.btn_audiotrack_stop:
                isLocalPlaying = false;
                mAudioTrack.pause();
                mAudioTrack.flush();
                stop();
                break;
        }
    }

    @Override
    protected void onDestroy() {
        super.onDestroy();
        mAudioTrack.release();
        stop();
    }

    private native void play(String path, String outPcm);
    private native void localplay(String path);
    private native void stop();

}

c++

#include <unistd.h>
#include "hjcommon.hpp"
extern "C" {
    #include "libavutil/imgutils.h"
}

static bool isRunning = true;
static jobject obj_audiotrack = 0; // AudioTrack 对象,全局引用
static jmethodID mid_write; // jmethodID 不用做成全局引用也能跨线程使用
static jmethodID mid_release; // AudioTrack 释放函数
static void initAudioTrack(JNIEnv *env, jobject instance, int sample_rate, int nb_channels)
{
    if (!obj_audiotrack) env->DeleteGlobalRef(obj_audiotrack);

    jclass clz_mp6 = env->GetObjectClass(instance);
    jmethodID mid_create = env->GetMethodID(clz_mp6, "createAudioTrack", "(II)Landroid/media/AudioTrack;"); // 获取创建AudioTrack实例的java函数
    LOGD("initAudioTrack sample_rate=%d, nb_channels=%d", sample_rate, nb_channels);
    jobject obj_aud = env->CallObjectMethod(instance, mid_create, sample_rate, nb_channels); // 传参 44100, 2 是因为我事先知道其采样率与双通道
    jclass clz_aud = env->GetObjectClass(obj_aud);

    jmethodID mid_play = env->GetMethodID(clz_aud, "play", "()V"); // 获取AudioTrack的play函数
    env->CallVoidMethod(obj_aud, mid_play); // 调用play函数
    mid_write = env->GetMethodID(clz_aud, "write", "([BII)I"); // 获取AudioTrack的write函数
    mid_release = env->GetMethodID(clz_aud, "release", "()V");

    obj_audiotrack = env->NewGlobalRef(obj_aud);
}
static void audiotrack_write(JNIEnv *env, uint8_t *out_buffer, int out_buffer_size) // 往 AudioTrack 中写入数据,播放音频
{
    //out_buffer缓冲区数据,转成byte数组
    jbyteArray audio_sample_array = env->NewByteArray(out_buffer_size);
    jbyte* sample_bytep = env->GetByteArrayElements(audio_sample_array, NULL);
    //out_buffer的数据复制到sampe_bytep
    memcpy(sample_bytep, out_buffer, out_buffer_size);
    //同步
    env->ReleaseByteArrayElements(audio_sample_array, sample_bytep, 0);

    //AudioTrack.write PCM数据
    env->CallIntMethod(obj_audiotrack, mid_write, audio_sample_array, 0, out_buffer_size);
    //释放局部引用
    env->DeleteLocalRef(audio_sample_array);
}

JNIEXPORT void JNICALL Java_hankin_hjmedia_ff_some_AudioTrackActivity_play(JNIEnv *env, jobject instance, jstring path_, jstring outPcm_)
{
    isRunning = true;
    char path[128];
    hjcpyJstr2char(env, path_, path);
    char outPcm[128];
    hjcpyJstr2char(env, outPcm_, outPcm);

    AVFormatContext * avFormatContext = 0;
    int ret = avformat_open_input(&avFormatContext, path, 0, 0);
    if (ret!=0)
    {
        LOGE("avformat_open_input error.");
        return;
    }
    ret = avformat_find_stream_info(avFormatContext, 0);
    if (ret!=0)
    {
        LOGE("avformat_find_stream_info error.");
        return;
    }
    int audioStream = av_find_best_stream(avFormatContext, AVMEDIA_TYPE_AUDIO, -1, -1, 0, 0);
    AVStream *avs = avFormatContext->streams[audioStream];
    // sample_rate=44100, channels=2, sample_format=8
    LOGD("sample_rate=%d, channels=%d, sample_format=%d", avs->codecpar->sample_rate, avs->codecpar->channels, avs->codecpar->format);

    AVCodecContext * audioCodecContext = avcodec_alloc_context3(0);
    int gaRet = hjgetAVDecoder6_1(audioCodecContext, avs->codecpar, false);
    if (gaRet!=0) return;
    LOGD("audioCodecContext->sample_rate=%d", audioCodecContext->sample_rate); // audioCodecContext->sample_rate=44100

    initAudioTrack(env, instance, audioCodecContext->sample_rate, av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO)); // 初始化 AudioTrack 44100, 2

    int frameCount = 0;
    AVPacket * packet = av_packet_alloc();
    AVFrame * frame = av_frame_alloc();
    FILE * fp = fopen(outPcm, "wb");

    // 音频解码出来后时无法直接播放的,需要重采样
    SwrContext * swrContext = swr_alloc(); // 创建音频重采样上下文
    // av_get_default_channel_layout 根据给的声道数返回默认的channel layout  ,固定让输出2声道   AV_SAMPLE_FMT_S16 样本格式   AV_CH_LAYOUT_STEREO 表示立声道
    swr_alloc_set_opts(swrContext, AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, audioCodecContext->sample_rate,
                       audioCodecContext->channel_layout, audioCodecContext->sample_fmt, audioCodecContext->sample_rate, 0, 0);
    int swrRet = swr_init(swrContext); // 初始化,返回 0 ok
    if (swrRet!=0)
    {
        LOGE("swr_init is failed : %s", av_err2str(swrRet));
        return;
    }
    unsigned char * pcm = 0; //new unsigned char[48000*4*2]; // 重采样时样本数量的缓存,设大点无所谓,但是不要小了
    int out_buffer_size = 0;
    bool is = true;

    while (isRunning)
    {
        ret = av_read_frame(avFormatContext, packet);
        if (ret!=0)
        {
            LOGI("decode end : %s", av_err2str(ret));
            break;
        }
        if (packet->stream_index==audioStream)
        {
            int num = 0;
            while (true)
            {
                num++;
                ret = avcodec_send_packet(audioCodecContext, packet);
                if (ret!=0)
                {
                    LOGW("avcodec_send_packet audio error.");
                    usleep(1000);
                }
                if (ret==0 || num>=5) break;
            }

            while (true)
            {
                ret = avcodec_receive_frame(audioCodecContext, frame);
                if (ret!=0) break;
                frameCount++;

                uint8_t * out[2] = {0}; // 因为在上面设置重采样参数时设置了输出声道数固定为2,所以这里数组长度是2? 下标1的只是表示数组结尾NULL?
                out[0] = pcm;
                if (is)
                {
                    is = false;
                    int per_sample = av_get_bytes_per_sample((AVSampleFormat)frame->format);
                    out_buffer_size = per_sample * frame->nb_samples * 2; // 音频大小:音频每个样本占字节数 * 单通道样本数 * 通道数(左右声道)
                    LOGD("per_sample=%d, out_buffer_size=%d", per_sample, out_buffer_size); // per_sample=4, out_buffer_size=8192
                    pcm = new unsigned char[out_buffer_size];
                    out[0] = pcm;
                }
                int len = swr_convert(swrContext, out, frame->nb_samples, (const uint8_t **) frame->data, frame->nb_samples);
                // format=8, sample_rate=44100, channels=2, nb_samples=1024, len=1024
                LOGD("format=%d, sample_rate=%d, channels=%d, nb_samples=%d, len=%d", frame->format, frame->sample_rate, frame->channels, frame->nb_samples, len);

                // 通过 AudioTrack 播放pcm音频
//                int out_buffer_size = av_samples_get_buffer_size(NULL, 2, frame->nb_samples, (AVSampleFormat) frame->format, 1); // 根据传入的参数计算每帧音频大小
                audiotrack_write(env, out[0], out_buffer_size); // 只在一个线程的话,音视频会卡顿杂音(原因是单线程音频重采样?)

                fwrite(out[0], 1, out_buffer_size, fp); // 保存到本地
            }
        }

//        usleep(1000 * 16);

        av_packet_unref(packet);
    }

    delete [] pcm;
    swr_free(&swrContext); // 释放内存

    av_packet_free(&packet);
    av_frame_free(&frame);
    avcodec_close(audioCodecContext);
    avcodec_free_context(&audioCodecContext);
    avformat_close_input(&avFormatContext);
    fclose(fp);

    /*
        AudioTrack 主要函数:

            开始播放
            public void play()throws IllegalStateException{}

            停止播放音频数据,如果是STREAM模式,会等播放完最后写入buffer的数据才会停止。如果立即停止,要调用pause()方法,然后调用flush方法,会舍弃还没有播放的数据
            public void stop()throws IllegalStateException{}

            暂停播放,调用play()重新开始播放
            public void pause()throws IllegalStateException {}

            只在模式为STREAM下可用。将音频数据刷进等待播放的队列,任何写入的数据如果没有提交的话,都会被舍弃,但是并不能保证所有用于数据的缓冲空间都可用于后续的写入。
            public void flush() {}

            释放本地AudioTrack资源
            public void release() {}

            返回当前的播放状态
            public int getPlayState() {}
     */
    env->CallVoidMethod(obj_audiotrack, mid_release); // 释放 AudioTrack
    env->DeleteGlobalRef(obj_audiotrack); // 删除全局引用

    // 调用java函数
    jclass  clz = env->GetObjectClass(instance);
    jmethodID mid = env->GetMethodID(clz, "setStatus", "()V");
    env->CallVoidMethod(instance, mid);
}

JNIEXPORT void JNICALL Java_hankin_hjmedia_ff_some_AudioTrackActivity_localplay(JNIEnv *env, jobject instance, jstring path_)
{
    isRunning = true;
    char path[128];
    hjcpyJstr2char(env, path_, path);

    initAudioTrack(env, instance, 44100, 2); // 初始化 AudioTrack 44100, 2
    FILE * fp = fopen(path, "rb");
    int size = 8192; // 这里的 8192 是因为我知道该本地音频每帧大小就是这个,不是8192也可以,但是下面的sleep就要去掉,否则音频会卡顿杂音
    unsigned char * buf = new unsigned char[size];
    int len = -1;
    while (isRunning && feof(fp)==0)
    {
        len = fread(buf, 1, size, fp);
        if (len>0)
        {
            audiotrack_write(env, buf, size);
        }
        else break;
        usleep(1000*16); // sleep 16毫秒是因为1秒60帧的时间间隔就是16,不sleep也没关系,只是语速会快一点
    }

    fclose(fp);
    env->CallVoidMethod(obj_audiotrack, mid_release); // 释放 AudioTrack
    env->DeleteGlobalRef(obj_audiotrack); // 删除全局引用
}

JNIEXPORT void JNICALL Java_hankin_hjmedia_ff_some_AudioTrackActivity_stop(JNIEnv *env, jobject instance)
{
    isRunning = false;
}

猜你喜欢

转载自blog.csdn.net/huanghuangjin/article/details/82807341