ijkplayer source code analysis audio playback process

Preface

This article is the second part of the process analysis, analyzing the audio playback process in ijkPlayer, in aout_thread, as shown in the following flowchart.
Insert picture description here

SDL_Aout structure

SDL_Aout is an abstraction of audio playback, which is implemented using OpenGLES playback and AudioTrack playback.

// ijksdl_aout.h
typedef struct SDL_Aout_Opaque SDL_Aout_Opaque;  // 在各自实现类有不同的定义
typedef struct SDL_Aout SDL_Aout;
struct SDL_Aout {
    
    
    SDL_mutex *mutex;
    double     minimal_latency_seconds;

    SDL_Class       *opaque_class;
    SDL_Aout_Opaque *opaque;
    void (*free_l)(SDL_Aout *vout);
    int (*open_audio)(SDL_Aout *aout, const SDL_AudioSpec *desired, SDL_AudioSpec *obtained);
    void (*pause_audio)(SDL_Aout *aout, int pause_on);
    void (*flush_audio)(SDL_Aout *aout);
    void (*set_volume)(SDL_Aout *aout, float left, float right);
    void (*close_audio)(SDL_Aout *aout);

    double (*func_get_latency_seconds)(SDL_Aout *aout);
    void   (*func_set_default_latency_seconds)(SDL_Aout *aout, double latency);

    // optional
    void   (*func_set_playback_rate)(SDL_Aout *aout, float playbackRate);
    void   (*func_set_playback_volume)(SDL_Aout *aout, float playbackVolume);
    int    (*func_get_audio_persecond_callbacks)(SDL_Aout *aout);

    // Android only
    int    (*func_get_audio_session_id)(SDL_Aout *aout);
};
  • Implemented as
    ijksdl_aout_android_opensles.c
    ijksdl_aout_android_audiotrack.c

Code flow

1. What method is used to play and what is the entrance

In a separate thread aout_thread, obtain audio decoding data for playback

static int aout_thread_n(JNIEnv *env, SDL_Aout *aout)
{
    
    
    SDL_Android_AudioTrack *atrack = opaque->atrack; // 获取AudioTrack实例
    SDL_AudioCallback audio_cblk = opaque->spec.callback; // callback,获取解码后音频数据

    SDL_SetThreadPriority(SDL_THREAD_PRIORITY_HIGH);

    while (!opaque->abort_request) {
    
     // 循环读取
        SDL_LockMutex(opaque->wakeup_mutex);
    
        if (opaque->need_flush) {
    
    
            opaque->need_flush = 0;
            SDL_Android_AudioTrack_flush(env, atrack);
        }
        SDL_UnlockMutex(opaque->wakeup_mutex);

        audio_cblk(userdata, buffer, copy_size); // 获取解码数据

        if (opaque->need_flush) {
    
    
            opaque->need_flush = 0;
            SDL_Android_AudioTrack_flush(env, atrack);
        } else {
    
    
            SDL_Android_AudioTrack_write(env, atrack, buffer, copy_size); // 写数据,即播放
        }
    }

    SDL_Android_AudioTrack_free(env, atrack);
    return 0;
}

2. How is aout_thread_n called?

  • Call audio_open in the stream_component_open function to open and create the audio player
    ff_ffplay.c#stream_component_open
    -> audio_open
    -> SDL_AoutOpenAudio // Go to the implementation of SDL_Aout

  • Implementation of ijksdl_aout_android_audiotrack.c
    -> ijksdl_aout_android_audiotrack.c#aout_open_audio
    -> ijksdl_aout_android_audiotrack.c#aout_open_audio_n, create aout_thread and execute aout_thread_n after
    audio_openexecution, that is, AudioTrack has been created and opened, waiting to read decoded data and play.

  • Implementation of ijksdl_aout_android_opensles.c
    -> ijksdl_aout_android_opensles.c#aout_open_audio, create aout_thread and execute aout_thread_n after
    audio_openexecution, that is, SLObjectItf has been created and opened, waiting to read decoded data and play.

The following uses ijksdl_aout_android_audiotrack.c as an example to sort out the code:

// ff_ffplay.c,stream_compoenent_open调用
static int audio_open(FFPlayer *opaque, int64_t wanted_channel_layout, int wanted_nb_channels,
                      int wanted_sample_rate, struct AudioParams *audio_hw_params) {
    
    
    SDL_AudioSpec wanted_spec, spec;
    wanted_spec.callback = sdl_audio_callback; // 设置回调函数
    SDL_AoutOpenAudio(ffp->aout, &wanted_spec, &spec); // 打开音频
}

// ijksdl_aout.c
int SDL_AoutOpenAudio(SDL_Aout *aout, const SDL_AudioSpec *desired, SDL_AudioSpec *obtained)
{
    
    
    if (aout && desired && aout->open_audio)
        return aout->open_audio(aout, desired, obtained);

    return -1;
}

// ijksdl_aout_android_audiotrack.c
static int aout_open_audio(SDL_Aout *aout, const SDL_AudioSpec *desired, SDL_AudioSpec *obtained) {
    
    
    return aout_open_audio_n(env, aout, desired, obtained);
}

// ijksdl_aout_android_audiotrack.c
static int aout_open_audio_n(JNIEnv *env, SDL_Aout *aout, const SDL_AudioSpec *desired, SDL_AudioSpec *obtained) {
    
    
    SDL_Aout_Opaque *opaque = aout->opaque;
    // 创建AudioTrack
    opaque->atrack = SDL_Android_AudioTrack_new_from_sdl_spec(env, desired); 

    // 开启线程aout_thread,进行播放流程
    opaque->audio_tid = SDL_CreateThreadEx(&opaque->_audio_tid, aout_thread, aout, "ff_aout_android");
} 

// ijksdl_aout_android_audiotrack.c
static int aout_thread(void *arg) {
    
    
    return aout_thread_n(env, aout);
}

3. Where is ffp->aout assigned, ie when SDK_Aout is initialized

  • Initialize SDK_Aout process:
    ijkplayer.c#ijkmp_prepare_async
    -> ijkplayer.c#ijkmp_prepare_async_l
    ->ff_play.c#ffp_prepare_async_l
    -> ff_ffpipeline.c#ffpipeline_open_audio_output
    -> ffpipeline_android_Aout_open_audio_output_open_audio_output // Create DLS
// ffpipeline_android.c
static SDL_Aout *func_open_audio_output(IJKFF_Pipeline *pipeline, FFPlayer *ffp)
{
    
    
    SDL_Aout *aout = NULL;
    if (ffp->opensles) {
    
    
        aout = SDL_AoutAndroid_CreateForOpenSLES();
    } else {
    
    
        aout = SDL_AoutAndroid_CreateForAudioTrack();
    }
    if (aout)
        SDL_AoutSetStereoVolume(aout, pipeline->opaque->left_volume, pipeline->opaque->right_volume);
    return aout;
}
  • Initialization assignment ffpipeline process:
    IjkMediaPlayer_native_setup
    -> ijkmp_android_create
    -> ffpipeline_android.c#ffpipeline_create_from_android // Create ffpipeline

4. Audio_cblk specific process

// ff_ffplay.c,  stream是buffer,向里填充数据,write到AudioTrack里去
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len) {
    
    
    int audio_size, len1;
    while (len > 0) {
    
    
          if (is->audio_buf_index >= is->audio_buf_size) {
    
    
            // audio_buf消耗完了,调用audio_decode_frame重新填充audio_buf
            audio_size = audio_decode_frame(ffp);
            is->audio_buf_size = audio_size;
            is->audio_buf_index = 0;
        }

        len1 = is->audio_buf_size - is->audio_buf_index;
        if (len1 > len)
            len1 = len;

        // 读取数据到stream中
        if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME) {
    
    
            memcpy(stream, (uint8_t *) is->audio_buf + is->audio_buf_index, len1);
        } else {
    
    
            memset(stream, 0, len1);
            if (!is->muted && is->audio_buf)
                SDL_MixAudio(stream, (uint8_t *) is->audio_buf + is->audio_buf_index, len1,
                             is->audio_volume);
        }

        // 向后移动,len>0则继续读取数据
        len -= len1;
        stream += len1;
        is->audio_buf_index += len1;
    }
}

static int audio_decode_frame(FFPlayer *ffp) {
    
    
    Frame *af;
    do {
    
    
        // 获取一个可读结点,读取新的音频帧
        if (!(af = frame_queue_peek_readable(&is->sampq))) {
    
    
            return -1;
        }
        // 移动指针指向下一个
        frame_queue_next(&is->sampq);
    } while (af->serial != is->audioq.serial);

    if () {
    
    
      // 重采样
    } else {
    
    
        // 把这一帧数据赋值给audio_buf
        is->audio_buf = af->frame->data[0];
        resampled_data_size = data_size;
    }

     // 更新audio_clock
     is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
     return resampled_data_size;
}

Reference:
ffplay audio output thread analysis
ijkplayer audio output analysis

Guess you like

Origin blog.csdn.net/u014099894/article/details/112970165