Android录音下————AudioRecord源码分析

Android录音下————AudioRecord源码分析

一.概述

在上一篇博客中,主要看了AudioRecord的应用。接下来让我们看一看AudioRecord源码的分析。

注:Native层源码版本:android5.1.0

1.主要分析点

分析的时候以上一篇博客AudioRecord使用过程中涉及到的方法来进行分析。主要分析下面几个方法:

  • getMinBufferSize:获取AudioRecord对象所需的最小缓冲区大小
  • new AudioRecord:创建一个AudioRecord对象
  • startRecording:启动AudioRecord进行录音
  • read:读取录音数据
  • stop:停止录音

2.储备知识

Android架构主要分为三层

  • 底层 :linux内核
  • 中间层:主要由c++层实现
  • 应用层:主要由java开发的应用程序

应用层和中间层的关联主要是由JNI来实现的,

所以大致过程就是,java应用产生的操作–>JNI调用中间层–>如果中间层需要硬件或者低层支持,则调用底层,否则处理完返回给应用层。

在音频通路建立的过程中,需要AndroidBinder相关的概念。
详细内容可见我之前的博客:Android之IPC4————Bander1 概述与Bander驱动

二.getMinBufferSize

getMinBufferSize:获取AudioRecord对象所需的最小缓冲区大小。

1.AR.getMinBufferSize(java)

 static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
        int channelCount = 0;
        switch (channelConfig) {
        //单声道
        case AudioFormat.CHANNEL_IN_DEFAULT: // AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
        case AudioFormat.CHANNEL_IN_MONO:
        case AudioFormat.CHANNEL_CONFIGURATION_MONO:
            channelCount = 1;
            break;
        //立体声    
        case AudioFormat.CHANNEL_IN_STEREO:
        case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
        case (AudioFormat.CHANNEL_IN_FRONT | AudioFormat.CHANNEL_IN_BACK):
            channelCount = 2;
            break;
        case AudioFormat.CHANNEL_INVALID:
        default:
            loge("getMinBufferSize(): Invalid channel configuration.");
            return ERROR_BAD_VALUE;
        }

		//调用的jni中的native_get_min_buff_size函数
        int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
        if (size == 0) {
            return ERROR_BAD_VALUE;
        }
        else if (size == -1) {
            return ERROR;
        }
        else {
            return size;
        }
    }

2.AR.getMinBufferSize(c++)

查看源码可知native_get_min_buff_size实现,是通过JNI函数进入framework/base/core/jni/android_media_AudioRecord.cpp的函数

native_get_min_buff_size函数到cpp中函数关联是通过android_media_AudioRecord.cpp中的函数数组来查看的

路径:framework/base/core/jni/android_media_AudioRecord.cpp

static const JNINativeMethod gMethods[] = {
    // name,               signature,  funcPtr
    {"native_start",         "(II)I",    (void *)android_media_AudioRecord_start},
    {"native_stop",          "()V",    (void *)android_media_AudioRecord_stop},
    {"native_setup",         "(Ljava/lang/Object;Ljava/lang/Object;[IIIII[ILjava/lang/String;J)I",
                                      (void *)android_media_AudioRecord_setup},
    {"native_finalize",      "()V",    (void *)android_media_AudioRecord_finalize},
    {"native_release",       "()V",    (void *)android_media_AudioRecord_release},
    {"native_read_in_byte_array",
                             "([BIIZ)I",
                                     (void *)android_media_AudioRecord_readInArray<jbyteArray>},
    {"native_read_in_short_array",
                             "([SIIZ)I",
                                     (void *)android_media_AudioRecord_readInArray<jshortArray>},
    {"native_read_in_float_array",
                             "([FIIZ)I",
                                     (void *)android_media_AudioRecord_readInArray<jfloatArray>},
    {"native_read_in_direct_buffer","(Ljava/lang/Object;IZ)I",
                                       (void *)android_media_AudioRecord_readInDirectBuffer},
    {"native_get_buffer_size_in_frames",
                             "()I", (void *)android_media_AudioRecord_get_buffer_size_in_frames},
    {"native_set_marker_pos","(I)I",   (void *)android_media_AudioRecord_set_marker_pos},
    {"native_get_marker_pos","()I",    (void *)android_media_AudioRecord_get_marker_pos},
    {"native_set_pos_update_period",
                             "(I)I",   (void *)android_media_AudioRecord_set_pos_update_period},
    {"native_get_pos_update_period",
                             "()I",    (void *)android_media_AudioRecord_get_pos_update_period},
//native_get_min_buff_size-->android_media_AudioRecord_get_min_buff_size
    {"native_get_min_buff_size",
                             "(III)I",   (void *)android_media_AudioRecord_get_min_buff_size},
    {"native_setInputDevice", "(I)Z", (void *)android_media_AudioRecord_setInputDevice},
    {"native_getRoutedDeviceId", "()I", (void *)android_media_AudioRecord_getRoutedDeviceId},
    {"native_enableDeviceCallback", "()V", (void *)android_media_AudioRecord_enableDeviceCallback},
    {"native_disableDeviceCallback", "()V",
                                        (void *)android_media_AudioRecord_disableDeviceCallback},
    {"native_get_timestamp", "(Landroid/media/AudioTimestamp;I)I",
                                       (void *)android_media_AudioRecord_get_timestamp},
};

路径:framework/base/core/jni/android_media_AudioRecord.cpp

static jint android_media_AudioRecord_get_min_buff_size(JNIEnv *env,  jobject thiz,
    jint sampleRateInHertz, jint channelCount, jint audioFormat) {

    ALOGV(">> android_media_AudioRecord_get_min_buff_size(%d, %d, %d)",
          sampleRateInHertz, channelCount, audioFormat);

    size_t frameCount = 0;
    audio_format_t format = audioFormatToNative(audioFormat);
    //以传址的方式获取formatCount值。
    status_t result = AudioRecord::getMinFrameCount(&frameCount,
            sampleRateInHertz,
            format,
            audio_channel_in_mask_from_count(channelCount));

    if (result == BAD_VALUE) {
        return 0;
    }
    if (result != NO_ERROR) {
        return -1;
    }
    return frameCount * channelCount * audio_bytes_per_sample(format);
}

根据最小的frameCount计算的buffersize。frameCount即一秒中有多少个frame。
frame是音频中一个常见单位:表示音乐帧,即采样位数(每个采样数据占用字节大小)*通道数(声音通道的个数)
所以最小buffer= 每秒钟帧数 * 通道数 *采样位数(每个采样数据所占用的字节大小)

三.new AudioRecord

1.AR.new AudioRecord(java)

public AudioRecord(int audioSource, int sampleRateInHz, int channelConfig, int audioFormat,
            int bufferSizeInBytes)
    throws IllegalArgumentException {
        this((new AudioAttributes.Builder())
                    .setInternalCapturePreset(audioSource)
                    .build(),
                (new AudioFormat.Builder())
                    .setChannelMask(getChannelMaskFromLegacyConfig(channelConfig,
                                        true/*allow legacy configurations*/))
                    .setEncoding(audioFormat)
                    .setSampleRate(sampleRateInHz)
                    .build(),
                bufferSizeInBytes,
                AudioManager.AUDIO_SESSION_ID_GENERATE);
    }

可以看出这里调用了另一个构造方法
下面的代码中省略了细节

public AudioRecord(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
            int sessionId) throws IllegalArgumentException {
        mRecordingState = RECORDSTATE_STOPPED;

		//获取主线程的looper
        // remember which looper is associated with the AudioRecord instanciation
        if ((mInitializationLooper = Looper.myLooper()) == null) {
            mInitializationLooper = Looper.getMainLooper();
        }

       .....//各种属性的初始化

    
         //调用native层的native_setup,把自己的WeakReference(软引用)传进去
        //TODO: update native initialization when information about hardware init failure
        //      due to capture device already open is available.
        int initResult = native_setup( new WeakReference<AudioRecord>(this),
                mAudioAttributes, sampleRate, mChannelMask, mChannelIndexMask,
                mAudioFormat, mNativeBufferSizeInBytes,
                session, ActivityThread.currentOpPackageName(), 0 /*nativeRecordInJavaObj*/);
        if (initResult != SUCCESS) {
            loge("Error code "+initResult+" when initializing native AudioRecord object.");
            return; // with mState == STATE_UNINITIALIZED
        }

        mSampleRate = sampleRate[0];
        mSessionId = session[0];

        mState = STATE_INITIALIZED;
    }

2.AR.android_media_AudioRecord_setup(c++)

目录:framework/base/core/jni/android_media_AudioRecord.cpp

android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,
        jobject jaa, jint sampleRateInHertz, jint channelMask,
                // Java channel masks map directly to the native definition
        jint audioFormat, jint buffSizeInBytes, jintArray jSession)
{
  ...
  
    i

    // create an uninitialized AudioRecord object
    //创建一个未初始化的AudioRecord对象
    sp<AudioRecord> lpRecorder = new AudioRecord();

   ...
    const status_t status = lpRecorder->set(paa->source,
        sampleRateInHertz,
        format,        // word length, PCM
        channelMask,
        frameCount,
        recorderCallback,// callback_t
        lpCallbackData,// void* user
        0,             // notificationFrames,
        true,          // threadCanCallJava
        sessionId,
        AudioRecord::TRANSFER_DEFAULT,
        flags,
        paa);

    if (status != NO_ERROR) {
		//初始化检查失败
        ALOGE("Error creating AudioRecord instance: initialization check failed with status %d.",
                status);
        goto native_init_failure;
    }

   ...

 
    // save our newly created C++ AudioRecord in the "nativeRecorderInJavaObj" field
    // of the Java object
    //将我们新创建的c++ AudioRecord保存在Java对象的“nativeRecorderInJavaObj”字段中
    setAudioRecord(env, thiz, lpRecorder);

    // save our newly created callback information in the "nativeCallbackCookie" field
    // of the Java object (in mNativeCallbackCookie) so we can free the memory in finalize()
	//将我们新创建的回调信息保存在Java对象的“nativeCallbackCookie”字段中(在mNativeCallbackCookie中),
	//这样我们就可以在finalize()中释放内存
	env->SetLongField(thiz, javaAudioRecordFields.nativeCallbackCookie, (jlong)lpCallbackData);

    return (jint) AUDIO_JAVA_SUCCESS;

    // failure:
native_init_failure:
    env->DeleteGlobalRef(lpCallbackData->audioRecord_class);
    env->DeleteGlobalRef(lpCallbackData->audioRecord_ref);
    delete lpCallbackData;
    env->SetLongField(thiz, javaAudioRecordFields.nativeCallbackCookie, 0);

    return (jint) AUDIORECORD_ERROR_SETUP_NATIVEINITFAILED;
}

在这个函数里出来大部分的初始化,比较核心的内容主要有:

  • lpRecorder->set
  • 把刚创建的AudioRecord对象保存在Java层(“nativeRecorderInJavaObj”),通过getAudioRecord函数再获取
  • 回调信息保存在Java对象的“nativeCallbackCookie”字段中

3.AR.set(c++)

android5.1版本AudioRecord
目录:\frameworks\av\media\libmedia\AudioRecord.cpp

status_t AudioRecord::set(
        audio_source_t inputSource,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        size_t frameCount,
        callback_t cbf,
        void* user,
        uint32_t notificationFrames,
        bool threadCanCallJava,
        int sessionId,
        transfer_type transferType,
        audio_input_flags_t flags,
        const audio_attributes_t* pAttributes)
{
    ALOGV("set(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
          "notificationFrames %u, sessionId %d, transferType %d, flags %#x",
          inputSource, sampleRate, format, channelMask, frameCount, notificationFrames,
          sessionId, transferType, flags);

   ....
   

    mNotificationFramesReq = notificationFrames;
    // mNotificationFramesAct is initialized in openRecord_l
  

    if (sessionId == AUDIO_SESSION_ALLOCATE) {
        mSessionId = AudioSystem::newAudioUniqueId();
    } else {
        mSessionId = sessionId;
    }
    ALOGV("set(): mSessionId %d", mSessionId);

    mFlags = flags;
    mCbf = cbf;

    if (cbf != NULL) {
        mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava);
        mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
    }

    // create the IAudioRecord
    //创建IAudioRecord
    status_t status = openRecord_l(0 /*epoch*/);

    if (status != NO_ERROR) {
        if (mAudioRecordThread != 0) {
            mAudioRecordThread->requestExit();   // see comment in AudioRecord.h
            mAudioRecordThread->requestExitAndWait();
            mAudioRecordThread.clear();
        }
        return status;
    }

    mStatus = NO_ERROR;
    mActive = false;
    mUserData = user;
    // TODO: add audio hardware input latency here
    mLatency = (1000*mFrameCount) / sampleRate;
    mMarkerPosition = 0;
    mMarkerReached = false;
    mNewPosition = 0;
    mUpdatePeriod = 0;
    AudioSystem::acquireAudioSessionId(mSessionId, -1);
    mSequence = 1;
    mObservedSequence = mSequence;
    mInOverrun = false;

    return NO_ERROR;
}

继续追踪openRecord_l
目录:\frameworks\av\media\libmedia\AudioRecord.cpp

// must be called with mLock held
status_t AudioRecord::openRecord_l(size_t epoch)
{
    status_t status;
    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
   ...
    audio_io_handle_t input;
    status = AudioSystem::getInputForAttr(&mAttributes, &input, (audio_session_t)mSessionId,
                                        mSampleRate, mFormat, mChannelMask, mFlags);

    if (status != NO_ERROR) {
        ALOGE("Could not get audio input for record source %d, sample rate %u, format %#x, "
              "channel mask %#x, session %d, flags %#x",
              mAttributes.source, mSampleRate, mFormat, mChannelMask, mSessionId, mFlags);
        return BAD_VALUE;
    }
    {
    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
    // we must release it ourselves if anything goes wrong.

	//现在我们有了一个I/O句柄的引用,但还没有将它交给AudioFlinger,
	//如果出了什么问题,我们必须自己解决。
    size_t frameCount = mReqFrameCount;
    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
                                // but we will still need the original value also
								//temp可以替换为修改后的frameCount值,
								//但是我们仍然需要原始值
								
    int originalSessionId = mSessionId;

    // The notification frame count is the period between callbacks, as suggested by the server.
	//通知帧计数是服务器建议的回调之间的周期。
	size_t notificationFrames = mNotificationFramesReq;

    sp<IMemory> iMem;           // for cblk
    sp<IMemory> bufferMem;
    sp<IAudioRecord> record = audioFlinger->openRecord(input,
                                                       mSampleRate, mFormat,
                                                       mChannelMask,
                                                       &temp,
                                                       &trackFlags,
                                                       tid,
                                                       &mSessionId,
                                                       &notificationFrames,
                                                       iMem,
                                                       bufferMem,
                                                       &status);
    ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
            "session ID changed from %d to %d", originalSessionId, mSessionId);

  ...

 

    // Starting address of buffers in shared memory.
    // The buffers are either immediately after the control block,
    // or in a separate area at discretion of server.
    //共享内存中缓冲区的起始地址。
	//缓冲区要么紧接在控制块之后,
	//或在另一个由服务器决定的区域。
    void *buffers;
    if (bufferMem == 0) {
        buffers = cblk + 1;
    } else {
        buffers = bufferMem->pointer();
        if (buffers == NULL) {
            ALOGE("Could not get buffer pointer");
            return NO_INIT;
        }
    }

    // invariant that mAudioRecord != 0 is true only after set() returns successfully
    //只有在set()成功返回后,mAudioRecord != 0才为true的不变式
    if (mAudioRecord != 0) {
        mAudioRecord->asBinder()->unlinkToDeath(mDeathNotifier, this);
        mDeathNotifier.clear();
    }
    mAudioRecord = record;
    mCblkMemory = iMem;
    mBufferMemory = bufferMem;
    IPCThreadState::self()->flushCommands();

    mCblk = cblk;
    // note that temp is the (possibly revised) value of frameCount
    //注意,temp是frameCount的值(可能经过了修改)
    if (temp < frameCount || (frameCount == 0 && temp == 0)) {o00
        ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
    }
    frameCount = temp;

    mAwaitBoost = false;
    if (mFlags & AUDIO_INPUT_FLAG_FAST) {
        if (trackFlags & IAudioFlinger::TRACK_FAST) {
            ALOGV("AUDIO_INPUT_FLAG_FAST successful; frameCount %zu", frameCount);
            mAwaitBoost = true;
        } else {
            ALOGV("AUDIO_INPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
            // once denied, do not request again if IAudioRecord is re-created
            //一旦被拒绝,如果重新创建IAudioRecord,则不要再次请求
            mFlags = (audio_input_flags_t) (mFlags & ~AUDIO_INPUT_FLAG_FAST);
        }
    }

    // Make sure that application is notified with sufficient margin before overrun
    //确保在超限前通知应用程序有足够的余量
    if (notificationFrames == 0 || notificationFrames > frameCount) {
        ALOGW("Received notificationFrames %zu for frameCount %zu", notificationFrames, frameCount);
    }
    mNotificationFramesAct = notificationFrames;

    // We retain a copy of the I/O handle, but don't own the reference
    //我们保留了I/O句柄的副本,但不拥有引用
    mInput = input;
    mRefreshRemaining = true;

    mFrameCount = frameCount;
    // If IAudioRecord is re-created, don't let the requested frameCount
    // decrease.  This can confuse clients that cache frameCount().
    //如果IAudioRecord被重新创建,不要让请求的frameCount减少。这可能会使缓存frameCount()的客户机感到困惑。
    if (frameCount > mReqFrameCount) {
        mReqFrameCount = frameCount;
    }

    // update proxy
    mProxy = new AudioRecordClientProxy(cblk, buffers, mFrameCount, mFrameSize);
    mProxy->setEpoch(epoch);
    mProxy->setMinimum(mNotificationFramesAct);

    mDeathNotifier = new DeathNotifier(this);
    mAudioRecord->asBinder()->linkToDeath(mDeathNotifier, this);

    return NO_ERROR;
    }

release:
    AudioSystem::releaseInput(input, (audio_session_t)mSessionId);
    if (status == NO_ERROR) {
        status = NO_INIT;
    }
    return status;
}

比较重要的几行代码

  • audioFlinger = AudioSystem::get_audio_flinger();
  • record = audioFlinger->openRecord
  • mAudioR09ecord = record;

下面主要分析AudioSystem::get_audio_flinger()和audioFlinger->openRecord

4.AS.get_audio_flinger(c++)

目录:\frameworks\av\media\libmedia\AudioSystem

const sp<IAudioFlinger> AudioSystem::get_audio_flinger()
{
    sp<IAudioFlinger> af;
    sp<AudioFlingerClient> afc;
    {
        Mutex::Autolock _l(gLock);
        if (gAudioFlinger == 0) {
            sp<IServiceManager> sm = defaultServiceManager();
            sp<IBinder> binder;
            do {
                binder = sm->getService(String16("media.audio_flinger"));
                if (binder != 0)
                    break;
                ALOGW("AudioFlinger not published, waiting...");
                usleep(500000); // 0.5 s
            } while (true);
            if (gAudioFlingerClient == NULL) {
                gAudioFlingerClient = new AudioFlingerClient();
            } else {
                if (gAudioErrorCallback) {
                    gAudioErrorCallback(NO_ERROR);
                }
            }
            binder->linkToDeath(gAudioFlingerClient);
            gAudioFlinger = interface_cast<IAudioFlinger>(binder);
            LOG_ALWAYS_FATAL_IF(gAudioFlinger == 0);
            afc = gAudioFlingerClient;
        }
        af = gAudioFlinger;
    }
    if (afc != 0) {
        af->registerClient(afc);
    }
    return af;
}

上面是一段很典型的利用binder的ipc

  • sm = defaultServiceManager()获取SM
  • binder = sm->getService(String16(“media.audio_flinger”));获取对应的服务
  • gAudioFlinger = interface_cast(binder)获取IAF对象,并将binder传入其中。

接下来我们继续追踪IAF.openRecord

5.IAF.openRecord(c++)

目录:\frameworks\av\media\libmedia\IAudioFlinger

virtual sp<IAudioRecord> openRecord(
                                audio_io_handle_t input,
                                uint32_t sampleRate,
                                audio_format_t format,
                                audio_channel_mask_t channelMask,
                                size_t *pFrameCount,
                                track_flags_t *flags,
                                pid_t tid,
                                int *sessionId,
                                size_t *notificationFrames,
                                sp<IMemory>& cblk,
                                sp<IMemory>& buffers,
                                status_t *status)
    {
        Parcel data, reply;
        sp<IAudioRecord> record;
        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
        ...data传输数据
        data.writeInt64(notificationFrames != NULL ? *notificationFrames : 0);
        cblk.clear();
        buffers.clear();
        status_t lStatus = remote()->transact(OPEN_RECORD, data, &reply);
        if (lStatus != NO_ERROR) {
            ALOGE("openRecord error: %s", strerror(-lStatus));
        } else {
           ...
            record = interface_cast<IAudioRecord>(reply.readStrongBinder());
            cblk = interface_cast<IMemory>(reply.readStrongBinder());
           ...
        }
        
        return record;
    }

重点分析

  • data传输数据
  • remote()->transact调用远端Service进行ipc
  • record = interface_cast 获取远端返回的结果

6.AF.openRecord(c++)

目录:frameworks/av/services/audioflinger/AudioFlinger.cpp

sp<IAudioRecord> AudioFlinger::openRecord(
        audio_io_handle_t input,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        size_t *frameCount,
        IAudioFlinger::track_flags_t *flags,
        pid_t tid,
        int *sessionId,
        size_t *notificationFrames,
        sp<IMemory>& cblk,
        sp<IMemory>& buffers,
        status_t *status)
{
    sp<RecordThread::RecordTrack> recordTrack;
    sp<RecordHandle> recordHandle;
    sp<Client> client;
    status_t lStatus;
    int lSessionId;

    cblk.clear();
    buffers.clear();

   ...

    {
        Mutex::Autolock _l(mLock);
        RecordThread *thread = checkRecordThread_l(input);
        if (thread == NULL) {
            ALOGE("openRecord() checkRecordThread_l failed");
            lStatus = BAD_VALUE;
            goto Exit;
        }

        pid_t pid = IPCThreadState::self()->getCallingPid();
       ...
        // TODO: the uid should be passed in as a parameter to openRecord
        recordTrack = thread->createRecordTrack_l(client, sampleRate, format, channelMask,
                                                  frameCount, lSessionId, notificationFrames,
                                                  IPCThreadState::self()->getCallingUid(),
                                                  flags, tid, &lStatus);
        LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (recordTrack == 0));

       
    }

   ...
    cblk = recordTrack->getCblk();
    buffers = recordTrack->getBuffers();

    // return handle to client
    recordHandle = new RecordHandle(recordTrack);

Exit:
    *status = lStatus;
    return recordHandle;
}

重点分析:

  • RecordThread *thread = checkRecordThread_l(input)
  • recordTrack = thread->createRecordTrack_l(…)
  • recordHandle = new RecordHandle(recordTrack)

所以最终返回的RecordHandle里面包含RecordTrack,然后在其中有包含RecordThread

7.流程梳理

  • java new AR–>AR…android_media_AudioRecord_setup–>AR.set–>AR.openRecord_l
  • 在AR.openRecord_l中调用AS.get_audio_flinger获取AF的binder
  • 在AR.openRecord_l通过AF.openRecord获得一个sp,包含RecordHandle–>RecordTrack–>RecordThread
  • 将上面的sp赋值给mAudioRecord

四. startRecording

上面是创建AudioRecord对象的大部分源码,接下来我们看看startRecording过程

1.AR.startRecording(java)

  public void startRecording()
    throws IllegalStateException {
        if (mState != STATE_INITIALIZED) {
            throw new IllegalStateException("startRecording() called on an "
                    + "uninitialized AudioRecord.");
        }

        // start recording
        synchronized(mRecordingStateLock) {
            if (native_start(MediaSyncEvent.SYNC_EVENT_NONE, 0) == SUCCESS) {
                handleFullVolumeRec(true);
                mRecordingState = RECORDSTATE_RECORDING;
            }
        }
    }

这里没什么说的直接调用的Native层的代码

2.AR.startRecording(c++)

目录:\frameworks\av\media\libmedia\AudioRecord.cpp

android_media_AudioRecord_start(JNIEnv *env, jobject thiz, jint event, jint triggerSession)
{
    sp<AudioRecord> lpRecorder = getAudioRecord(env, thiz);
    if (lpRecorder == NULL ) {
        jniThrowException(env, "java/lang/IllegalStateException", NULL);
        return (jint) AUDIO_JAVA_ERROR;
    }

    return nativeToJavaStatus(
            lpRecorder->start((AudioSystem::sync_event_t)event, triggerSession));
}

3.AR.start(c++)

目录:\frameworks\av\media\libmedia\AudioRecord.cpp

status_t AudioRecord::start(AudioSystem::sync_event_t event, int triggerSession)
{
    ALOGV("start, sync event %d trigger session %d", event, triggerSession);

    AutoMutex lock(mLock);
    if (mActive) {
        return NO_ERROR;
    }

    // reset current position as seen by client to 0
    //将客户端看到的当前位置重置为0
    mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
    //使用processAudioBuffer()强制刷新剩余帧,因为在停止之前的最后一次读取可能是局部的。
    // force refresh of remaining frames by processAudioBuffer() as last
    // read before stop could be partial.
    mRefreshRemaining = true;

    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
    int32_t flags = android_atomic_acquire_load(&mCblk->mFlags);

    status_t status = NO_ERROR;
    if (!(flags & CBLK_INVALID)) {
        ALOGV("mAudioRecord->start()");
        status = mAudioRecord->start(event, triggerSession);
        if (status == DEAD_OBJECT) {
            flags |= CBLK_INVALID;
        }
    }
    if (flags & CBLK_INVALID) {
        status = restoreRecord_l("start");
    }

    if (status != NO_ERROR) {
        ALOGE("start() status %d", status);
    } else {
        mActive = true;
        sp<AudioRecordThread> t = mAudioRecordThread;
        if (t != 0) {
            t->resume();
        } else {
            mPreviousPriority = getpriority(PRIO_PROCESS, 0);
            get_sched_policy(0, &mPreviousSchedulingGroup);
            androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
        }
    }

    return status;
}

在这里最关键的一行代码 status = mAudioRecord->start(event, triggerSession);
关于mAudioRecord变量在上一节我们有提过

所以: mAudioRecord.start–>RecordHandle.start–>RecordTrack.start–>RecordThread.start

4.RT.start(c++)

目录:frameworks/av/services/audioflinger/Threads.cpp

status_t AudioFlinger::RecordThread::start(RecordThread::RecordTrack* recordTrack,
                                           AudioSystem::sync_event_t event,
                                           int triggerSession)
{
    ALOGV("RecordThread::start event %d, triggerSession %d", event, triggerSession);
    sp<ThreadBase> strongMe = this;
    status_t status = NO_ERROR;

    if (event == AudioSystem::SYNC_EVENT_NONE) {
        recordTrack->clearSyncStartEvent();
    } else if (event != AudioSystem::SYNC_EVENT_SAME) {
        recordTrack->mSyncStartEvent = mAudioFlinger->createSyncEvent(event,
                                       triggerSession,
                                       recordTrack->sessionId(),
                                       syncStartEventCallback,
                                       recordTrack);
        // Sync event can be cancelled by the trigger session if the track is not in a
        // compatible state in which case we start record immediately
        if (recordTrack->mSyncStartEvent->isCancelled()) {
            recordTrack->clearSyncStartEvent();
        } else {
            // do not wait for the event for more than AudioSystem::kSyncRecordStartTimeOutMs
            recordTrack->mFramesToDrop = -
                    ((AudioSystem::kSyncRecordStartTimeOutMs * recordTrack->mSampleRate) / 1000);
        }
    }

    {
        // This section is a rendezvous between binder thread executing start() and RecordThread
        AutoMutex lock(mLock);
        if (mActiveTracks.indexOf(recordTrack) >= 0) {
            if (recordTrack->mState == TrackBase::PAUSING) {
                ALOGV("active record track PAUSING -> ACTIVE");
                recordTrack->mState = TrackBase::ACTIVE;
            } else {
                ALOGV("active record track state %d", recordTrack->mState);
            }
            return status;
        }

        // TODO consider other ways of handling this, such as changing the state to :STARTING and
        //      adding the track to mActiveTracks after returning from AudioSystem::startInput(),
        //      or using a separate command thread
        recordTrack->mState = TrackBase::STARTING_1;
        mActiveTracks.add(recordTrack);
        mActiveTracksGen++;
        status_t status = NO_ERROR;
        if (recordTrack->isExternalTrack()) {
            mLock.unlock();
            status = AudioSystem::startInput(mId, (audio_session_t)recordTrack->sessionId());
            mLock.lock();
            // FIXME should verify that recordTrack is still in mActiveTracks
            if (status != NO_ERROR) {
                mActiveTracks.remove(recordTrack);
                mActiveTracksGen++;
                recordTrack->clearSyncStartEvent();
                ALOGV("RecordThread::start error %d", status);
                return status;
            }
        }
        // Catch up with current buffer indices if thread is already running.
        // This is what makes a new client discard all buffered data.  If the track's mRsmpInFront
        // was initialized to some value closer to the thread's mRsmpInFront, then the track could
        // see previously buffered data before it called start(), but with greater risk of overrun.

        recordTrack->mRsmpInFront = mRsmpInRear;
        recordTrack->mRsmpInUnrel = 0;
        // FIXME why reset?
        if (recordTrack->mResampler != NULL) {
            recordTrack->mResampler->reset();
        }
        recordTrack->mState = TrackBase::STARTING_2;
        // signal thread to start
        mWaitWorkCV.broadcast();
        if (mActiveTracks.indexOf(recordTrack) < 0) {
            ALOGV("Record failed to start");
            status = BAD_VALUE;
            goto startError;
        }
        return status;
    }

startError:
    if (recordTrack->isExternalTrack()) {
        AudioSystem::stopInput(mId, (audio_session_t)recordTrack->sessionId());
    }
    recordTrack->clearSyncStartEvent();
    // FIXME I wonder why we do not reset the state here?
    return status;
}

继续追踪AudioSystem::startInput

5.AS.startInput(c++)

目录:\frameworks\av\media\libmedia\AudioSystem

status_t AudioSystem::startInput(audio_io_handle_t input,
                                 audio_session_t session)
{
    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
    if (aps == 0) return PERMISSION_DENIED;
    return aps->startInput(input, session);
}

AudioSystem::get_audio_policy_service()的过程和上面的二.4中的代码内容差不多。都是获取一个IAudioPolicyService对象,在这个对象中有binder。

IAudioPolicyService.startInput会去通过binder调用AudioPolicyService.startInput,所以我们继续看AudioPolicyService.startInput

6.APS.startInput(c++)

在AudioPolicyService.cpp,并没有找到startInput,然后在同目录下的AudioPolicyIntefaceImpl.cpp,找到其实现。

目录: frameworks/av/services/audiopolicy/AudioPolicyIntefaceImpl.cpp

status_t AudioPolicyService::startInput(audio_io_handle_t input,
                                        audio_session_t session)
{
    if (mAudioPolicyManager == NULL) {
        return NO_INIT;
    }
    Mutex::Autolock _l(mLock);

    return mAudioPolicyManager->startInput(input, session);
}

在AudioPolicyService.h中找到mAudioPolicyManager的变量类型

AudioPolicyInterface *mAudioPolicyManager;

而AudioPolicyInterface实际上由不同的平台各自实现的。当然也有默认的。
这个类的作用是:

  • 跟踪当前系统状态(可移动设备连接、电话状态、用户请求…)。系统状态更改和用户操作将通过audiopolicyInterface的方法通知音频策略管理器。
  • 创建audioTrack对象时接收的process getoutput()
  • 同样处理从音频记录对象接收的getinput()和putinput()查询,并配置音频输入。

在这里我们看看Android默认实现的类

ps:这一块的源码真不好理,加上我半吊子的c++水平…

7.APMB.startInput(c++)

目录:hardware/libhardware_legacy/audio/AudioPolicyManagerBase.cpp

status_t AudioPolicyManagerBase::startInput(audio_io_handle_t input)
{
    ALOGV("startInput() input %d", input);
    ssize_t index = mInputs.indexOfKey(input);
    if (index < 0) {
        ALOGW("startInput() unknown input %d", input);
        return BAD_VALUE;
    }
    AudioInputDescriptor *inputDesc = mInputs.valueAt(index);

#ifdef AUDIO_POLICY_TEST
    if (mTestInput == 0)
#endif //AUDIO_POLICY_TEST
    {
        // refuse 2 active AudioRecord clients at the same time except if the active input
        // uses AUDIO_SOURCE_HOTWORD in which case it is closed.
        audio_io_handle_t activeInput = getActiveInput();
        if (!isVirtualInputDevice(inputDesc->mDevice) && activeInput != 0) {
            AudioInputDescriptor *activeDesc = mInputs.valueFor(activeInput);
            if (activeDesc->mInputSource == AUDIO_SOURCE_HOTWORD) {
                ALOGW("startInput() preempting already started low-priority input %d", activeInput);
                stopInput(activeInput);
                releaseInput(activeInput);
            } else {
                ALOGW("startInput() input %d failed: other input already started", input);
                return INVALID_OPERATION;
            }
        }
    }

    audio_devices_t newDevice = getDeviceForInputSource(inputDesc->mInputSource);
    if ((newDevice != AUDIO_DEVICE_NONE) && (newDevice != inputDesc->mDevice)) {
        inputDesc->mDevice = newDevice;
    }

    // automatically enable the remote submix output when input is started
    //启动输入时自动启用远程子混合输出
    if (audio_is_remote_submix_device(inputDesc->mDevice)) {
        setDeviceConnectionState(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
                AudioSystem::DEVICE_STATE_AVAILABLE, AUDIO_REMOTE_SUBMIX_DEVICE_ADDRESS);
    }

    AudioParameter param = AudioParameter();
    param.addInt(String8(AudioParameter::keyRouting), (int)inputDesc->mDevice);

    int aliasSource = (inputDesc->mInputSource == AUDIO_SOURCE_HOTWORD) ?
                                        AUDIO_SOURCE_VOICE_RECOGNITION : inputDesc->mInputSource;

    param.addInt(String8(AudioParameter::keyInputSource), aliasSource);
    ALOGV("AudioPolicyManager::startInput() input source = %d", inputDesc->mInputSource);

    mpClientInterface->setParameters(input, param.toString());

    inputDesc->mRefCount = 1;
    return NO_ERROR;
}

好了,我们看的最后面调用的mpClientInterface->setParameters(input, param.toString());

mpClientInterface的是 AudioPolicyClientInterface

所以继续追踪APC.setParameters
目录:frameworks/av/services/audiopolicy/AudioPolicyClientInterface

void AudioPolicyService::AudioPolicyClient::setParameters(audio_io_handle_t io_handle,
                   const String8& keyValuePairs,
                   int delay_ms)
{
    mAudioPolicyService->setParameters(io_handle, keyValuePairs.string(), delay_ms);
}

继续追踪APS.setParameters

8. APS.setParameters(c++)

目录:frameworks/av/services/audiopolicy/AudioPolicyService.cpp

void AudioPolicyService::setParameters(audio_io_handle_t ioHandle,
                                       const char *keyValuePairs,
                                       int delayMs)
{
    mAudioCommandThread->parametersCommand(ioHandle, keyValuePairs,
                                           delayMs);
}

追踪 mAudioCommandThread->parametersCommand
还是在这个文件下

status_t AudioPolicyService::AudioCommandThread::parametersCommand(audio_io_handle_t ioHandle,
                                                                   const char *keyValuePairs,
                                                                   int delayMs)
{
    sp<AudioCommand> command = new AudioCommand();
    command->mCommand = SET_PARAMETERS;
    sp<ParametersData> data = new ParametersData();
    data->mIO = ioHandle;
    data->mKeyValuePairs = String8(keyValuePairs);
    command->mParam = data;
    command->mWaitStatus = true;
    ALOGV("AudioCommandThread() adding set parameter string %s, io %d ,delay %d",
            keyValuePairs, ioHandle, delayMs);
    return sendCommand(command, delayMs);
}

可以看到,在这里启动线程,并将SET_PARAMETERS指令传过去

继续追踪sendCommand
还是在这个文件下

status_t AudioPolicyService::AudioCommandThread::sendCommand(sp<AudioCommand>& command, int delayMs)
{
    {
        Mutex::Autolock _l(mLock);
        insertCommand_l(command, delayMs);
        mWaitWorkCV.signal();
    }
    Mutex::Autolock _l(command->mLock);
    while (command->mWaitStatus) {
        nsecs_t timeOutNs = kAudioCommandTimeoutNs + milliseconds(delayMs);
        if (command->mCond.waitRelative(command->mLock, timeOutNs) != NO_ERROR) {
            command->mStatus = TIMED_OUT;
            command->mWaitStatus = false;
        }
    }
    return command->mStatus;
}

在这里面进行了加锁操作
同时调用了 insertCommand_l()函数
继续追踪

/ insertCommand_l() must be called with mLock held
void AudioPolicyService::AudioCommandThread::insertCommand_l(sp<AudioCommand>& command, int delayMs)
{
    ssize_t i;  // not size_t because i will count down to -1
    Vector < sp<AudioCommand> > removedCommands;
    command->mTime = systemTime() + milliseconds(delayMs);

    // acquire wake lock to make sure delayed commands are processed
  	//获取唤醒锁以确保已处理延迟的命令   
    if (mAudioCommands.isEmpty()) {
        acquire_wake_lock(PARTIAL_WAKE_LOCK, mName.string());
    }

    // check same pending commands with later time stamps and eliminate them
    //用以后的时间戳检查相同的挂起命令并消除它们
    for (i = mAudioCommands.size()-1; i >= 0; i--) {
        sp<AudioCommand> command2 = mAudioCommands[i];
        // commands are sorted by increasing time stamp: no need to scan the rest of mAudioCommands
        if (command2->mTime <= command->mTime) break;

        // create audio patch or release audio patch commands are equivalent
        // with regard to filtering
        if ((command->mCommand == CREATE_AUDIO_PATCH) ||
                (command->mCommand == RELEASE_AUDIO_PATCH)) {
            if ((command2->mCommand != CREATE_AUDIO_PATCH) &&
                    (command2->mCommand != RELEASE_AUDIO_PATCH)) {
                continue;
            }
        } else if (command2->mCommand != command->mCommand) continue;

        switch (command->mCommand) {
        case SET_PARAMETERS: {
            ParametersData *data = (ParametersData *)command->mParam.get();
            ParametersData *data2 = (ParametersData *)command2->mParam.get();
            if (data->mIO != data2->mIO) break;
            ALOGV("Comparing parameter command %s to new command %s",
                    data2->mKeyValuePairs.string(), data->mKeyValuePairs.string());
            AudioParameter param = AudioParameter(data->mKeyValuePairs);
            AudioParameter param2 = AudioParameter(data2->mKeyValuePairs);
            for (size_t j = 0; j < param.size(); j++) {
                String8 key;
                String8 value;
                param.getAt(j, key, value);
                for (size_t k = 0; k < param2.size(); k++) {
                    String8 key2;
                    String8 value2;
                    param2.getAt(k, key2, value2);
                    if (key2 == key) {
                        param2.remove(key2);
                        ALOGV("Filtering out parameter %s", key2.string());
                        break;
                    }
                }
            }
            // if all keys have been filtered out, remove the command.
            // otherwise, update the key value pairs
            ///如果已筛选出所有键,则删除该命令。
            //否则,更新键值对
            if (param2.size() == 0) {
                removedCommands.add(command2);
            } else {
                data2->mKeyValuePairs = param2.toString();
            }
            command->mTime = command2->mTime;
            // force delayMs to non 0 so that code below does not request to wait for
            // command status as the command is now delayed
            delayMs = 1;
        } break;

        ...其他指令...
    }

    // remove filtered commands
    for (size_t j = 0; j < removedCommands.size(); j++) {
        // removed commands always have time stamps greater than current command
        for (size_t k = i + 1; k < mAudioCommands.size(); k++) {
            if (mAudioCommands[k].get() == removedCommands[j].get()) {
                ALOGV("suppressing command: %d", mAudioCommands[k]->mCommand);
                mAudioCommands.removeAt(k);
                break;
            }
        }
    }
    removedCommands.clear();

    // Disable wait for status if delay is not 0.
    // Except for create audio patch command because the returned patch handle
    // is needed by audio policy manager
    if (delayMs != 0 && command->mCommand != CREATE_AUDIO_PATCH) {
        command->mWaitStatus = false;
    }

    // insert command at the right place according to its time stamp
    ALOGV("inserting command: %d at index %zd, num commands %zu",
            command->mCommand, i+1, mAudioCommands.size());
    mAudioCommands.insertAt(command, i + 1);
}

在这个函数里,将SET_PARAMETERS指令放入SET_PARAMETERS。我们继续来看,他是在什么地方执行该指令的

在threadLoop函数中

ool AudioPolicyService::AudioCommandThread::threadLoop()
{
    nsecs_t waitTime = INT64_MAX;

    mLock.lock();
    while (!exitPending())
    {
        sp<AudioPolicyService> svc;
        while (!mAudioCommands.isEmpty() && !exitPending()) {
            nsecs_t curTime = systemTime();
            // commands are sorted by increasing time stamp: execute them from index 0 and up
            if (mAudioCommands[0]->mTime <= curTime) {
                sp<AudioCommand> command = mAudioCommands[0];
                mAudioCommands.removeAt(0);
                mLastCommand = command;

                switch (command->mCommand) {
              ...其他指令
                case SET_PARAMETERS: {
                    ParametersData *data = (ParametersData *)command->mParam.get();
                    ALOGV("AudioCommandThread() processing set parameters string %s, io %d",
                            data->mKeyValuePairs.string(), data->mIO);
                    command->mStatus = AudioSystem::setParameters(data->mIO, data->mKeyValuePairs);
                    }break;
             ...其他指令
        }
        // release mLock before releasing strong reference on the service as
        // AudioPolicyService destructor calls AudioCommandThread::exit() which acquires mLock.
        mLock.unlock();
        svc.clear();
        mLock.lock();
        if (!exitPending() && mAudioCommands.isEmpty()) {
            // release delayed commands wake lock
            release_wake_lock(mName.string());
            ALOGV("AudioCommandThread() going to sleep");
            mWaitWorkCV.waitRelative(mLock, waitTime);
            ALOGV("AudioCommandThread() waking up");
        }
    }
    // release delayed commands wake lock before quitting
    if (!mAudioCommands.isEmpty()) {
        release_wake_lock(mName.string());
    }
    mLock.unlock();
    return false;
}

好的,在这里我们看到调用了AudioSystem::setParameters方法,继续追踪

9.AS.setParameters(c++)

目录:frameworks\av\media\libmedia\AudioSystem

status_t AudioSystem::setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs)
{
    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
    if (af == 0) return PERMISSION_DENIED;
    return af->setParameters(ioHandle, keyValuePairs);
}

af = AudioSystem::get_audio_flinger(); 这行代码就很熟悉了,获取IAudioFlinger,其中包含bindr。

10.AF.setParameters(c++)

status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs)
{
    ALOGV("setParameters(): io %d, keyvalue %s, calling pid %d",
            ioHandle, keyValuePairs.string(), IPCThreadState::self()->getCallingPid());

    // check calling permissions
    if (!settingsAllowed()) {
        return PERMISSION_DENIED;
    }

    // AUDIO_IO_HANDLE_NONE means the parameters are global to the audio hardware interface
    if (ioHandle == AUDIO_IO_HANDLE_NONE) {
        Mutex::Autolock _l(mLock);
        status_t final_result = NO_ERROR;
        {
            AutoMutex lock(mHardwareLock);
            mHardwareStatus = AUDIO_HW_SET_PARAMETER;
            for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
                audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
                status_t result = dev->set_parameters(dev, keyValuePairs.string());
                final_result = result ?: final_result;
            }
            mHardwareStatus = AUDIO_HW_IDLE;
        }
        // disable AEC and NS if the device is a BT SCO headset supporting those pre processings
        AudioParameter param = AudioParameter(keyValuePairs);
        String8 value;
        if (param.get(String8(AUDIO_PARAMETER_KEY_BT_NREC), value) == NO_ERROR) {
            bool btNrecIsOff = (value == AUDIO_PARAMETER_VALUE_OFF);
            if (mBtNrecIsOff != btNrecIsOff) {
                for (size_t i = 0; i < mRecordThreads.size(); i++) {
                    sp<RecordThread> thread = mRecordThreads.valueAt(i);
                    audio_devices_t device = thread->inDevice();
                    bool suspend = audio_is_bluetooth_sco_device(device) && btNrecIsOff;
                    // collect all of the thread's session IDs
                    KeyedVector<int, bool> ids = thread->sessionIds();
                    // suspend effects associated with those session IDs
                    for (size_t j = 0; j < ids.size(); ++j) {
                        int sessionId = ids.keyAt(j);
                        thread->setEffectSuspended(FX_IID_AEC,
                                                   suspend,
                                                   sessionId);
                        thread->setEffectSuspended(FX_IID_NS,
                                                   suspend,
                                                   sessionId);
                    }
                }
                mBtNrecIsOff = btNrecIsOff;
            }
        }
        String8 screenState;
        if (param.get(String8(AudioParameter::keyScreenState), screenState) == NO_ERROR) {
            bool isOff = screenState == "off";
            if (isOff != (AudioFlinger::mScreenState & 1)) {
                AudioFlinger::mScreenState = ((AudioFlinger::mScreenState & ~1) + 2) | isOff;
            }
        }
        return final_result;
    }

    // hold a strong ref on thread in case closeOutput() or closeInput() is called
    // and the thread is exited once the lock is released
    sp<ThreadBase> thread;
    {
        Mutex::Autolock _l(mLock);
        thread = checkPlaybackThread_l(ioHandle);
        if (thread == 0) {
            thread = checkRecordThread_l(ioHandle);
        } else if (thread == primaryPlaybackThread_l()) {
            // indicate output device change to all input threads for pre processing
            AudioParameter param = AudioParameter(keyValuePairs);
            int value;
            if ((param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) &&
                    (value != 0)) {
                for (size_t i = 0; i < mRecordThreads.size(); i++) {
                    mRecordThreads.valueAt(i)->setParameters(keyValuePairs);
                }
            }
        }
    }
    if (thread != 0) {
        return thread->setParameters(keyValuePairs);
    }
    return BAD_VALUE;
}

在AF.setParameters()的两个参数里,AudioParameter::keyRouting是音频策略管理器和AudioParameter::keyInputSource选定的输入设备参数。

此时通过队列发送命令,交给另一个线程处理。之后交回调用硬件适配层(HAL)的音频硬件。即接口AudioHardwareInterface的子类,由硬件平台厂家实现.

11.流程梳理

AR.startRecord
–>AR.startRecording(c++)
–>AR.start–>RecordHandle::start
–>RecordTrack::start
–> RecordHandle::start

–>RecordTrack::start
–>RecordThread::start

–>AudioSystem::startInput
–>AudioPolicyService::startInput
–>AudioPolicyManagerBase(或厂家自己的管理器)::startInput
–>AudioFlinger::setParameters
–>AudioPolicyService::setParameters
–>异步(通过向队列发送命令,然后由另一个线程去处理)

–>硬件适配层(HAL)的音频硬件(接口 AudioHardwareInterface的子类,由硬件平台厂家实现)

AudioRecord运行在应用程序进程中,AudioFlinger和AudioPolicyService运行在mediaserver进程中。应用程序调用到AudioFlinger,然后由AudioPolicyService确定输入设备,并将其作为设置到音频硬件中,进行相应的切换。

上面的切换完成后.
在应用程序侧的AudioRecord的start函数中会让其线程ClientRecordThread开始工作。

....
 androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);

在AudioFlinger中,录音线程的start函数(RecordThread::start)需要发送信号唤醒正在mWaitWorkCV上等待的线程循环,开始录音工作。录音线程循环进入睡眠等待的代码片段如下:

mWaitWorkCV.wait(mLock);//等待
LOGV(“RecordThread: loop starting”);
continue;


LOGV(“Signal record thread”);
mWaitWorkCV.signal();//在切换完输入设备后,唤醒线程

录音线程RecordThread的线程循环函数threadLoop比较复杂,要考虑单声道和双声道,要考虑16位还是8位的PCM数据,还要考虑是否需要重采样等因素。但总体原理是,使用HAL中的音频输入流AudioStreamIn的read函数读取音频数据数据到缓冲区mRsmpInBuffer:

  // otherwise use the HAL / AudioStreamIn directly
ssize_t bytesRead = mInput->stream->read(mInput->stream, &mRsmpInBuffer[rear * mChannelCount], mBufferSize);

五.read

1.android_media_AudioRecord_readInByteArray

前面的没什么可说的,
read → native_read_in_short_array → android_media_AudioRecord_readInByteArray:
直接看android_media_AudioRecord_readInByteArray中的实现
目录:目录:framework/base/core/jni/android_media_AudioRecord.cpp

static jint android_media_AudioRecord_readInByteArray(JNIEnv *env,  jobject thiz,
                                                        jbyteArray javaAudioData,
                                                        jint offsetInBytes, jint sizeInBytes) {
    jbyte* recordBuff = NULL;
    // get the audio recorder from which we'll read new audio samples
    //拿起录音机,我们将从里面读取新的音频样本
    sp<AudioRecord> lpRecorder = getAudioRecord(env, thiz);
    if (lpRecorder == NULL) {
        ALOGE("Unable to retrieve AudioRecord object, can't record");
        return 0;
    }

    if (!javaAudioData) {
        ALOGE("Invalid Java array to store recorded audio, can't record");
        return 0;
    }

    // get the pointer to where we'll record the audio
    // NOTE: We may use GetPrimitiveArrayCritical() when the JNI implementation changes in such
    // a way that it becomes much more efficient. When doing so, we will have to prevent the
    // AudioSystem callback to be called while in critical section (in case of media server
    // process crash for instance)
    
    ////把指针指向我们录制音频的地方
	//注意:当JNI实现以一种更有效的方式发生变化时,我们可能会使用getprimartivearraycritical()。
	//这样做时,我们必须防止在关键部分调用AudioSystem回调(例如在媒体服务器进程崩溃的情况下)
    recordBuff = (jbyte *)env->GetByteArrayElements(javaAudioData, NULL);

    if (recordBuff == NULL) {
        ALOGE("Error retrieving destination for recorded audio data, can't record");
        return 0;
    }

    // read the new audio data from the native AudioRecord object
    //从本机AudioRecord对象读取新的音频数据
    ssize_t recorderBuffSize = lpRecorder->frameCount()*lpRecorder->frameSize();
    ssize_t readSize = lpRecorder->read(recordBuff + offsetInBytes,
                                        sizeInBytes > (jint)recorderBuffSize ?
                                            (jint)recorderBuffSize : sizeInBytes );
    env->ReleaseByteArrayElements(javaAudioData, recordBuff, 0);

    if (readSize < 0) {
        readSize = (jint)AUDIO_JAVA_INVALID_OPERATION;
    }
    return (jint) readSize;
}

2.AR.read

ssize_t AudioRecord::read(void* buffer, size_t userSize)
{
    if (mTransfer != TRANSFER_SYNC) {
        return INVALID_OPERATION;
    }

    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
		//检查。用户最有可能传递错误代码,这将使返回值不明确(实际上是size vs error)。
        // sanity-check. user is most-likely passing an error code, and it would
        // make the return value ambiguous (actualSize vs error).
        ALOGE("AudioRecord::read(buffer=%p, size=%zu (%zu)", buffer, userSize, userSize);
        return BAD_VALUE;
    }

    ssize_t read = 0;
    Buffer audioBuffer;

    while (userSize >= mFrameSize) {
        audioBuffer.frameCount = userSize / mFrameSize;

        status_t err = obtainBuffer(&audioBuffer, &ClientProxy::kForever);
        if (err < 0) {
            if (read > 0) {
                break;
            }
            return ssize_t(err);
        }

        size_t bytesRead = audioBuffer.size;
        memcpy(buffer, audioBuffer.i8, bytesRead);
        buffer = ((char *) buffer) + bytesRead;
        userSize -= bytesRead;
        read += bytesRead;

        releaseBuffer(&audioBuffer);
    }

    return read;
}

六.stop

stop → native_stop → android_media_AudioRecord_stop → lpRecorder->stop() :

void AudioRecord::stop()
{
    AutoMutex lock(mLock);
    if (!mActive) {
        return;
    }

    mActive = false;
    mProxy->interrupt();
    mAudioRecord->stop();
    // the record head position will reset to 0, so if a marker is set, we need
    // to activate it again
    mMarkerReached = false;
    sp<AudioRecordThread> t = mAudioRecordThread;
    if (t != 0) {
        t->pause();
    } else {
        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
        set_sched_policy(0, mPreviousSchedulingGroup);
    }
}

停止的过程较为简单,应用程序侧跨进程调用AudioFlinger侧的stop,从而调用AudioSystem::stopInput,再到AudioPolicyService::stopInput,再到音频策略管理器(如AudioPolicyManagerBase)的stopInput,接着再使用AudioFlinger再到HAL音频硬件的setParameters将AudioParameter::keyRouting设置为0。在停止完输入设备后,应用程序侧需要停止其回调函数线程。

关于录音数据的采集,以及停止。由于时间原因,我就没有仔细的跟下去。有兴趣的可以自己跟一下。

七.参考资料

Android源码分析:录音AudioRecording
AudioRecord 流程及源码分析

发布了120 篇原创文章 · 获赞 478 · 访问量 16万+

猜你喜欢

转载自blog.csdn.net/qq_38499859/article/details/87892448
今日推荐