NDK开发笔记:FFmpeg音视频同步2(内存池应用)

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/a360940265a/article/details/86357801

NDK开发笔记:FFmpeg音视频同步2(内存池应用)

本章继续上一篇的内容,实现nativePlay方法,创建简易的AVPacket缓冲区,实现高效的存储。废话不说,直接上代码。

JNIEXPORT void JNICALL
Java_org_zzrblog_ffmp_SyncPlayer_nativePlay(JNIEnv *env, jobject instance)
{
    if(mSyncPlayer == NULL) {
        LOGE("%s","请调用函数:nativeInit");
        return;
    }
    if(mSyncPlayer->input_format_ctx == NULL) {
        LOGW("%s","请调用函数:nativePrepare");
        return;
    }

    mSyncPlayer->stop_thread_avpacket_distributor = 0;
    pthread_create(&(mSyncPlayer->thread_avpacket_distributor), NULL, avpacket_distributor, mSyncPlayer);
    usleep(1000); // 1000us = 1ms
    // 意义在于让avpacket_distributor比video_avframe_decoder早点运行。
    // 不清楚的同学可以学习java.util.concurrent.CountDownLatch的运用,触类旁通。

    mSyncPlayer->stop_thread_video_decoder = 0;
    pthread_create(&(mSyncPlayer->thread_video_decoder), NULL, video_avframe_decoder, mSyncPlayer);

    mSyncPlayer->stop_thread_audio_decoder = 0;
    pthread_create(&(mSyncPlayer->thread_audio_decoder), NULL, audio_avframe_decoder, mSyncPlayer);

    usleep(50000); // 50ms
}

按照我们之前设计的思路,三个工作线程。AVPacket分发器(avpacket_distributor)视频解码(video_avframe_decoder)音频解码(audio_avframe_decoder)并且我们把各自的线程Tid保存到SyncPlayer结构体,方便nativeRelease的回收;除此之外我还各自再引用一个int值标志位,来标识工作线程是否请求停止退出。

// avpacket_distributor:负责不断的读取视频文件中AVPacket,分别放入对应的解码器
void* avpacket_distributor(void* arg)
{
    SyncPlayer *player = (SyncPlayer *) arg;
    AVFormatContext *pFormatContext = player->input_format_ctx;
    
    AVPacket* packet = av_packet_alloc();
    while (av_read_frame(pFormatContext, pkt) >= 0)
    {
    	if(player->stop_thread_avpacket_distributor != 0)
    	{
            break;
    	}
        if (pkt->stream_index == player->video_stream_index)
        {
            // 把av_packet_alloc创建出来的内存,保存到缓冲区?
            // 缓冲区自己维护一片内存,packet的内存拷贝到缓冲区?
        }
        if (pkt->stream_index == player->audio_stream_index)
        {
            // ...
        }
    }
    av_packet_unref(packet);
    LOGI("thread_avpacket_distributor exit ...\n");
    return 0;
}

先处理AVPacket分发器,这部分内容我决定慢慢的讲。按照之前的模板代码,在正式 av_read_frame 读取avpacket之前,我们av_packet_alloc 或者 av_new_packet 一个堆内存的AVPacket,并持有其指针。每次循环使用这部分的堆内存进行解码工作,再通过方法av_packet_unref解除引用。

那么在这里线程的工作模式下这样做还好吗?我们在一个线程创建出来的堆内存,放到一个缓冲区,然后再交给另外一个线程使用?听着都觉得懊恼啊!那么能否替换一种思路,创建一个多线程共享的缓冲区内存池?然后采集AVPacket的工作线程写入数据到缓冲区内存并进行标识,解码线程从缓冲区读取对应的AVPacket并解除标识?按照这个思路,我设计出了一个简易的AVPacket_buffer,定义在AVPacket_buffer.h 实现在AVPacket_buffer.c 代码如下:

//
// Created by nicky on 2019/1/9.
//
#pragma once
#ifndef BLOGAPP_AVPACKET_BUFFER_H
#define BLOGAPP_AVPACKET_BUFFER_H

#define BUFFER_SIZE 100

#include "include/libavcodec/avcodec.h"

typedef struct _AVPacket_buffer{
    //长度
    int size;

    //AVPacket指针的数组,总共有size个
    AVPacket * * avpacket_ptr_array;

    //进行 写 或者 读 操作时需要按照先后顺序,依次进行
    int write_current_position;
    int read_current_position;

    //保留扩充字段
    void * reserve;

} AVPacket_buffer, AV_PACKET_BUFFER;

// 创建AVPacket缓冲区
AV_PACKET_BUFFER* alloc_avpacket_buffer(int size);
// 回收AVPacket缓冲区
void free_avpacket_buffer(AV_PACKET_BUFFER* pAVPacketBuffer);
// 获取一个写入AVPacket的单元
AVPacket* get_write_packet(AV_PACKET_BUFFER * pAVPacketBuffer);
// 获取一个读取AVPacket的单元
AVPacket* get_read_packet(AV_PACKET_BUFFER * pAVPacketBuffer);

#endif //BLOGAPP_AVPACKET_BUFFER_H
//
// Created by nicky on 2019/1/9.
//
#include "AVPacket_buffer.h"

// 创建AVPacket缓冲区
AV_PACKET_BUFFER* alloc_avpacket_buffer(int size)
{
    AV_PACKET_BUFFER* pAVPacketBuffer = (AV_PACKET_BUFFER*)malloc(sizeof(AV_PACKET_BUFFER));
    pAVPacketBuffer->size = size;
    pAVPacketBuffer->write_current_position = 0;
    pAVPacketBuffer->read_current_position = 0;
    //数组开辟空间
    pAVPacketBuffer->avpacket_ptr_array = calloc((size_t) size, sizeof(AVPacket*));
    int i;
    for(i=0; i<size; i++){
        pAVPacketBuffer->avpacket_ptr_array[i] = av_packet_alloc();
        // must be freed using av_packet_free().
    }
    return pAVPacketBuffer;
}

// 回收AVPacket缓冲区
void free_avpacket_buffer(AV_PACKET_BUFFER* pAVPacketBuffer)
{
    for(int i=0; i<pAVPacketBuffer->size; i++){
        // void av_packet_free(AVPacket **pkt);
        av_packet_free(& (pAVPacketBuffer->avpacket_ptr_array[i]) );
    }
    free(pAVPacketBuffer->avpacket_ptr_array);
    free(pAVPacketBuffer);
}

// 获取下一个索引位置
int get_next(AV_PACKET_BUFFER * pAVPacketBuffer, int current){
    return (current + 1) % pAVPacketBuffer->size;
}

// 获取一个写入AVPacket的单元
AVPacket* get_write_packet(AV_PACKET_BUFFER * pAVPacketBuffer)
{
    int current = pAVPacketBuffer->write_current_position;
    pAVPacketBuffer->write_current_position = get_next(pAVPacketBuffer, current);
    return pAVPacketBuffer->avpacket_ptr_array[current];
}

// 获取一个读取AVPacket的单元
AVPacket* get_read_packet(AV_PACKET_BUFFER * pAVPacketBuffer)
{
    int current = pAVPacketBuffer->read_current_position;
    pAVPacketBuffer->read_current_position = get_next(pAVPacketBuffer, current);
    return pAVPacketBuffer->avpacket_ptr_array[current];
}

AV_PACKET_BUFFER结构体中的成员都很好理解,size是可以缓冲区的大小,根据size的值,动态申请size个元素,大小为sizeof(AVPacket*)的堆内存空间,存放AVPacket指针。并且我们还要为每个指针创建分配空间(av_packet_alloc)。创建的内存记得释放!创建的内存记得释放!创建的内存记得释放!

然后初始化读写标志位write_current_position 和 read_current_position。读写的范围都控制在size的范围内循环操作。可能有同学会说,这样循环那岂不是会出现覆盖的现象。是的,但是在一个正常size空间内,正常读写逻辑是不会出现覆盖的现象的。所以这个是现象是可以轻松避免的。

好了,缓冲区有了,我们回到nativePlay,在三个工作线程开始之前,我们要增加一些准备工作。

typedef struct _SyncPlayer {
    // ... ...
    pthread_t thread_avpacket_distributor;
    int stop_thread_avpacket_distributor;   //1为真(请求停止stop)0为假(继续工作continue)

    pthread_t thread_video_decoder;
    int stop_thread_video_decoder;
    AV_PACKET_BUFFER* video_avpacket_buffer;

    pthread_t thread_audio_decoder;
    int stop_thread_audio_decoder;
    AV_PACKET_BUFFER* audio_avpacket_buffer;
} SyncPlayer;


JNIEXPORT void JNICALL
Java_org_zzrblog_ffmp_SyncPlayer_nativePlay(JNIEnv *env, jobject instance)
{
    // ...
    initPlayerAVPacketBuffer(mSyncPlayer);

    mSyncPlayer->stop_thread_avpacket_distributor = 0;
    pthread_create(&(mSyncPlayer->thread_avpacket_distributor), NULL, avpacket_distributor, mSyncPlayer);
    usleep(1000); // 1000us = 1ms

    mSyncPlayer->stop_thread_video_decoder = 0;
    pthread_create(&(mSyncPlayer->thread_video_decoder), NULL, video_avframe_decoder, mSyncPlayer);

    mSyncPlayer->stop_thread_audio_decoder = 0;
    pthread_create(&(mSyncPlayer->thread_audio_decoder), NULL, audio_avframe_decoder, mSyncPlayer);
}

void initPlayerAVPacketBuffer(SyncPlayer* player)
{
    // 视频的AVPacket的缓冲区
    if(player->video_avpacket_buffer != NULL) {
        free_avpacket_buffer(player->video_avpacket_buffer);
        player->video_avpacket_buffer = NULL; // 防止野指针
    }
    player->video_avpacket_buffer = alloc_avpacket_buffer(100);
    // 音频的AVPacket的缓冲区
    if(player->audio_avpacket_buffer != NULL) {
        free_avpacket_buffer(player->audio_avpacket_buffer);
        player->audio_avpacket_buffer = NULL; // 防止野指针
    }
    player->audio_avpacket_buffer = alloc_avpacket_buffer(100);
}

一切都向着预期的发展,然后我回到avpacket_distributor,把av_read_frame读取的avpacket写入缓冲区了。随即另外一个重点来了。

// avpacket_distributor:负责不断的读取视频文件中AVPacket,分别放入对应的解码器
void* avpacket_distributor(void* arg)
{
    SyncPlayer *player = (SyncPlayer *) arg;
    AVFormatContext *pFormatContext = player->input_format_ctx;
    //AVPacket* packet = av_packet_alloc();
    // 不用堆内存空间,因为线程创建的堆内存通过memcopy复制自定义的AVPacket_buffer当中,不高效。
    AVPacket packet; // 栈内存空间
    AVPacket *pkt = &packet; // 指向栈内存空间的指针

    while (av_read_frame(pFormatContext, pkt) >= 0)
    {
        if(player->stop_thread_avpacket_distributor != 0)
            break;
        if (pkt->stream_index == player->video_stream_index)
        {
            AV_PACKET_BUFFER *video_buffer = player->video_avpacket_buffer;
            AVPacket *video_avpacket_buffer_data = get_write_packet(video_buffer);
            //buffer内部堆空间 = 当前栈空间数据,间接赋值。
            *video_avpacket_buffer_data = packet;
            //memcpy(video_avpacket_buffer_data, packet, sizeof(*packet));
        }
        if (pkt->stream_index == player->audio_stream_index)
        {
            AV_PACKET_BUFFER *audio_buffer = player->audio_avpacket_buffer;
            AVPacket *audio_avpacket_buffer_data = get_write_packet(audio_buffer);
            //buffer内部堆空间 = 当前栈空间数据,间接赋值。
            *audio_avpacket_buffer_data = packet;
        }
    }
    //av_packet_unref(packet);
    // 不需要在此解引用,应当在解码线程使用之后。
    LOGI("thread_avpacket_distributor exit ...\n");
    return 0;
}

请注意!这次av_read_frame从格式上下文解压的AVPacket,我不再由av_packet_alloc使用堆内存空间,而是直接使用线程的栈内存空间,并利用一个指针变量指向其栈内存地址。然后,我们在缓冲区获取一个写入的指针,通过取值操作(*p)= 线程栈变量packet,到达内存的间接拷贝,提高效率。

这里要理解清楚,线程栈内存对象AVPacket packet;   和 从缓冲区(AV_PACKET_BUFFER)获取的写入数据指针AVPacket *video_avpacket_buffer_data = get_write_packet(AV_PACKET_BUFFER),这个指针指向的是之前已经创建的堆内存。只有通过取值操作 ( *p )操作的才是真正的堆内存块。千万不要忽略取值操作,直接 video_avpacket_buffer_data = packet; 这样堆内存是没有任何数据的。

AVPacket分发器的内容基本上完成了,剩下的就是视频解码线程 和 音频解码线程。代码如下:

void* video_avframe_decoder(void* arg)
{
    SyncPlayer* player = (SyncPlayer*)arg;
    AVCodecContext* videoCodecCtx = player->input_codec_ctx[player->video_stream_index];
    AV_PACKET_BUFFER* videoAVPacketButter = player->video_avpacket_buffer;

    AVFrame *yuv_frame = av_frame_alloc();
    AVFrame *rgb_frame = av_frame_alloc();

    // 准备native绘制的窗体
    ANativeWindow* nativeWindow = player->native_window;
    // 设置缓冲区的属性(宽、高、像素格式)
    ANativeWindow_setBuffersGeometry(nativeWindow, videoCodecCtx->width, videoCodecCtx->height, WINDOW_FORMAT_RGBA_8888);
    // 绘制时的缓冲区
    ANativeWindow_Buffer nativeWinBuffer;
    int ret;
    while(player->stop_thread_video_decoder == 0)
    {
        AVPacket* packet = get_read_packet(videoAVPacketButter);
        //AVPacket->AVFrame
        ret = avcodec_send_packet(videoCodecCtx, packet);
        if (ret == AVERROR_EOF){
            av_packet_unref(packet);
            LOGW("video_decoder avcodec_send_packet:%d\n", ret);
            break;
        }else if(ret < 0){
            av_packet_unref(packet);
            LOGE("video_decoder avcodec_send_packet:%d\n", ret);
            continue;
        }

        while(ret >= 0)
        {
            ret = avcodec_receive_frame(videoCodecCtx, yuv_frame);
            if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){
                LOGD("video_decoder avcodec_receive_frame:%d\n", ret);
                break;
            }else if (ret < 0) {
                LOGW("video_decoder avcodec_receive_frame:%d\n", AVERROR(ret));
                goto end;  //end处进行资源释放等善后处理
            }

            if (ret >= 0)
            {
                ANativeWindow_lock(nativeWindow, &nativeWinBuffer, NULL);
                // 上锁并关联 ANativeWindow + ANativeWindow_Buffer
                av_image_fill_arrays(rgb_frame->data, rgb_frame->linesize, nativeWinBuffer.bits,
                                     AV_PIX_FMT_RGBA, videoCodecCtx->width, videoCodecCtx->height, 1 );
                // rgb.AVFrame对象 关联 ANativeWindow_Buffer的真实内存空间actual bits.
                I420ToARGB(yuv_frame->data[0], yuv_frame->linesize[0],
                           yuv_frame->data[2], yuv_frame->linesize[2],
                           yuv_frame->data[1], yuv_frame->linesize[1],
                           rgb_frame->data[0], rgb_frame->linesize[0],
                           videoCodecCtx->width, videoCodecCtx->height);
                // yuv.AVFrame 转 rgb.AVFrame
                ANativeWindow_unlockAndPost(nativeWindow);
                // 释放锁并 swap交换显示内存到屏幕上。
            }
        }
        // 解除引用
        av_packet_unref(packet);
    }

end:
    av_frame_free(&yuv_frame);
    av_frame_free(&rgb_frame);
    LOGI("thread_video_avframe_decoder exit ...\n");
    return 0;
}
void* audio_avframe_decoder(void* arg)
{
    JNIEnv *env = NULL;
    if ( (*gJavaVM)->AttachCurrentThread(gJavaVM, &env,NULL) != JNI_OK) {
        LOGE("gJavaVM->Env Error!\n");
        pthread_exit((void *) -1);
    }

    SyncPlayer* player = (SyncPlayer*)arg;
    AVCodecContext* audioCodecCtx = player->input_codec_ctx[player->audio_stream_index];
    AV_PACKET_BUFFER* audioAVPacketButter = player->audio_avpacket_buffer;

    AVFrame *frame = av_frame_alloc();
    //16bit 44100 PCM 数据的实际内存空间。
    uint8_t *out_buffer = (uint8_t *)av_malloc(MAX_AUDIO_FRAME_SIZE);
    // AudioTrack.play
    (*env)->CallVoidMethod(env, player->audio_track, player->audio_track_play_mid);

    int ret;
    while(player->stop_thread_audio_decoder == 0)
    {
        AVPacket* packet = get_read_packet(audioAVPacketButter);
        //AVPacket->AVFrame
        ret = avcodec_send_packet(audioCodecCtx, packet);
        if (ret == AVERROR_EOF){
            av_packet_unref(packet);
            LOGW("audio_decoder avcodec_send_packet:%d\n", ret);
            break;
        }else if(ret < 0){
            av_packet_unref(packet);
            LOGE("audio_decoder avcodec_send_packet:%d\n", ret);
            continue;
        }

        while(ret >= 0)
        {
            ret = avcodec_receive_frame(audioCodecCtx, frame);
            if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
                LOGD("audio_decoder avcodec_receive_frame:%d\n", ret);
                break;
            } else if (ret < 0) {
                LOGW("audio_decoder avcodec_receive_frame:%d\n", AVERROR(ret));
                goto end;  //end处进行资源释放等善后处理
            }
            if (ret >= 0)
            {
                swr_convert(player->swr_ctx, &out_buffer, MAX_AUDIO_FRAME_SIZE, (const uint8_t **) frame->data, frame->nb_samples);
                //获取sample的size
                int out_buffer_size = av_samples_get_buffer_size(NULL, player->out_channel_nb,
                                                                 frame->nb_samples, player->out_sample_fmt, 1);
                //AudioTrack.write(byte[] int int) 需要byte数组,对应jni的jbyteArray
                //需要把out_buffer缓冲区数据转成byte数组
                jbyteArray audio_data_byteArray = (*env)->NewByteArray(env, out_buffer_size);
                jbyte* fp_AudioDataArray = (*env)->GetByteArrayElements(env, audio_data_byteArray, NULL);
                memcpy(fp_AudioDataArray, out_buffer, (size_t) out_buffer_size);
                (*env)->ReleaseByteArrayElements(env, audio_data_byteArray, fp_AudioDataArray,0);
                // AudioTrack.write PCM数据
                (*env)->CallIntMethod(env,player->audio_track,player->audio_track_write_mid,
                                      audio_data_byteArray, 0, out_buffer_size);
                //!!!释放局部引用,要不然会局部引用溢出
                (*env)->DeleteLocalRef(env,audio_data_byteArray);
                usleep(1000 * 16);
            }
        }

        av_packet_unref(packet);
    }

end:
    av_free(out_buffer);
    av_frame_free(&frame);
    LOGI("thread_audio_avframe_decoder exit ...\n");
    (*gJavaVM)->DetachCurrentThread(gJavaVM);
    return 0;
}

流程代码都是之前例子提取的,音频线程的JavaVM对象,是在JNI_OnLoad获取的,这些小问题我就不上代码了。

不要忘记资源的回收!不要忘记资源的回收!不要忘记资源的回收!

JNIEXPORT void JNICALL
Java_org_zzrblog_ffmp_SyncPlayer_nativeRelease(JNIEnv *env, jobject instance)
{
    if(mSyncPlayer == NULL)
        return;
    if(mSyncPlayer->input_format_ctx == NULL){
        return;
    }
    // 暂停工作线程
    mSyncPlayer->stop_thread_avpacket_distributor = 1;
    pthread_join(mSyncPlayer->thread_avpacket_distributor, NULL);
    mSyncPlayer->stop_thread_video_decoder = 1;
    pthread_join(mSyncPlayer->thread_video_decoder, NULL);
    mSyncPlayer->stop_thread_audio_decoder = 1;
    pthread_join(mSyncPlayer->thread_audio_decoder, NULL);
    // 释放音频相关
    (*env)->DeleteGlobalRef(env, mSyncPlayer->audio_track);
    swr_free(&(mSyncPlayer->swr_ctx));
    // 释放解码器
    for(int i=0; i<mSyncPlayer->num_streams; i++) {
        // 有可能出现为空,因为只保存了音视频的AVCodecContext,没有处理字幕流的
        // 但是空间还是按照num_streams的个数创建了
        AVCodecContext * pCodecContext = mSyncPlayer->input_codec_ctx[i];
        if(pCodecContext != NULL)
        {
            avcodec_close(pCodecContext);
            avcodec_free_context(&pCodecContext);
            pCodecContext = NULL; //防止野指针
        }
    }
    free(mSyncPlayer->input_codec_ctx);
    // 释放AVPacket缓冲区
    free_avpacket_buffer(mSyncPlayer->audio_avpacket_buffer);
    free_avpacket_buffer(mSyncPlayer->video_avpacket_buffer);
    // 释放输入文件上下文
    avformat_close_input(&(mSyncPlayer->input_format_ctx));
    avformat_free_context(mSyncPlayer->input_format_ctx);
    mSyncPlayer->input_format_ctx = NULL;
    // 释放 SyncPlayer
    (*env)->DeleteGlobalRef(env, mSyncPlayer->jinstance);
    free(mSyncPlayer);
    mSyncPlayer = NULL;
}

我们已经完成整个音视频解码播放的整改,结合线程的知识达到非阻塞的效果。现在,把代码跑起来吧。效果肯定是很怪,因为缓冲区还有个BUG!而且还没做同步的操作嘛!大家有时间看看(1)我所说的CameraRecordEncoderCore2,思考我们这里FFmpeg怎么同步?

猜你喜欢

转载自blog.csdn.net/a360940265a/article/details/86357801