android 音频总结2 ----mark

4.9 AudioTrack创建过程_Track和共享内存

回顾:

a. APP创建AudioTrack <-----------------> AudioFlinger中PlaybackThread创建对应的Track

b. APP给AudioTrack提供音频数据有2种方式: 一次性提供(MODE_STATIC)、边播放边提供(MODE_STREAM)

问:

a. 音频数据存在buffer中, 这个buffer由谁提供? APP 还是 PlaybackThread ?

b. APP提供数据, PlaybackThread消耗数据, 如何同步?

共享内存有谁创建

a.  MODE_STATIC(一次性提前提供数据)  由APP创建共享内存( app可知Buffer大小)

b. MODE_STREAM(边播放边提供)  由playbakcThread创建共享内存(为了让APP简单,省事)

APP: playbackThread如何同步数据?

扫描二维码关注公众号,回复: 9638871 查看本文章

a.  MODE_STATIC:   无需同步,APP先构造,playbackThread后消费

b. MODE_STREAM:   需同步,使用环行BUFFER来同步

测试程序:

Shared_mem_test.cpp (z:\android-5.0.2\frameworks\base\media\tests\audiotests)  

int AudioTrackTest::Test01() {

    sp heap;

    sp iMem;

    uint8_t* p;

    short smpBuf[BUF_SZ];

    long rate = 44100;

    unsigned long phi;

    unsigned long dPhi;

    long amplitude;

    long freq = 1237;

    float f0;

    f0 = pow(2., 32.) * freq / (float)rate;

    dPhi = (unsigned long)f0;

    amplitude = 1000;

    phi = 0;

    Generate(smpBuf, BUF_SZ, amplitude, phi, dPhi);  // fill buffer

    for (int i = 0; i < 1024; i++) {

        // 事先分配好内存

        heap = new MemoryDealer(1024*1024, "AudioTrack Heap Base");

        iMem = heap->allocate(BUF_SZ*sizeof(short));

        p = static_cast(iMem->pointer());

        memcpy(p, smpBuf, BUF_SZ*sizeof(short));

        sp track = new AudioTrack(AUDIO_STREAM_MUSIC,// stream type

               rate,

               AUDIO_FORMAT_PCM_16_BIT,// word length, PCM

               AUDIO_CHANNEL_OUT_MONO,

               iMem);

        status_t status = track->initCheck();

        if(status != NO_ERROR) {

            track.clear();

            ALOGD("Failed for initCheck()");

            return -1;

        }

        // start play

        ALOGD("start");

        track->start();

        usleep(20000);

        ALOGD("stop");

        track->stop();

        iMem.clear();

        heap.clear();

        usleep(20000);

    }

    return 0;

}

MediaAudioTrackTest.java (z:\android-5.0.2\frameworks\base\media\tests\mediaframeworktest\src\com\android\mediaframeworktest\functional\audio)   

    //Test case 4: setPlaybackHeadPosition() beyond what has been written

    @LargeTest

    public void testSetPlaybackHeadPositionTooFar() throws Exception {

        // constants for test

        final String TEST_NAME = "testSetPlaybackHeadPositionTooFar";

        final int TEST_SR = 22050;

        final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;

        final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;

        final int TEST_MODE = AudioTrack.MODE_STREAM;

        final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;

        //-------- initialization --------------

        int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);

        AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,

                2*minBuffSize, TEST_MODE);

        byte data[] = new byte[minBuffSize];

        // make up a frame index that's beyond what has been written: go from buffer size to frame

        //   count (given the audio track properties), and add 77.

        int frameIndexTooFar = (2*minBuffSize/2) + 77;

        //--------    test        --------------

        assumeTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);

        track.write(data, 0, data.length);

        track.write(data, 0, data.length);

        track.play();

        track.stop();

        assumeTrue(TEST_NAME, track.getPlayState() == AudioTrack.PLAYSTATE_STOPPED);

        assertTrue(TEST_NAME, track.setPlaybackHeadPosition(frameIndexTooFar) == AudioTrack.ERROR_BAD_VALUE);

        //-------- tear down      --------------

        track.release();

    }

    /**

     * Class constructor with {@link AudioAttributes} and {@link AudioFormat}.

     * @param attributes a non-null {@link AudioAttributes} instance.

     * @param format a non-null {@link AudioFormat} instance describing the format of the data

     *     that will be played through this AudioTrack. See {@link AudioFormat.Builder} for

     *     configuring the audio format parameters such as encoding, channel mask and sample rate.

     * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read

     *   from for playback. If using the AudioTrack in streaming mode, you can write data into

     *   this buffer in smaller chunks than this size. If using the AudioTrack in static mode,

     *   this is the maximum size of the sound that will be played for this instance.

     *   See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size

     *   for the successful creation of an AudioTrack instance in streaming mode. Using values

     *   smaller than getMinBufferSize() will result in an initialization failure.

     * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}.

     * @param sessionId ID of audio session the AudioTrack must be attached to, or

     *   {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction

     *   time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before

     *   construction.

     * @throws IllegalArgumentException

     */

    public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,

            int mode, int sessionId)

                    throws IllegalArgumentException {

        // mState already == STATE_UNINITIALIZED

        if (attributes == null) {

            throw new IllegalArgumentException("Illegal null AudioAttributes");

        }

        if (format == null) {

            throw new IllegalArgumentException("Illegal null AudioFormat");

        }

        // remember which looper is associated with the AudioTrack instantiation

        Looper looper;

        if ((looper = Looper.myLooper()) == null) {

            looper = Looper.getMainLooper();

        }

        int rate = 0;

        if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_SAMPLE_RATE) != 0)

        {

            rate = format.getSampleRate();

        } else {

            rate = AudioSystem.getPrimaryOutputSamplingRate();

            if (rate <= 0) {

                rate = 44100;

            }

        }

        int channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;

        if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0)

        {

            channelMask = format.getChannelMask();

        }

        int encoding = AudioFormat.ENCODING_DEFAULT;

        if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) {

            encoding = format.getEncoding();

        }

        audioParamCheck(rate, channelMask, encoding, mode);

        mStreamType = AudioSystem.STREAM_DEFAULT;

        audioBuffSizeCheck(bufferSizeInBytes);

        mInitializationLooper = looper;

        IBinder b = ServiceManager.getService(Context.APP_OPS_SERVICE);

        mAppOps = IAppOpsService.Stub.asInterface(b);

        mAttributes = (new AudioAttributes.Builder(attributes).build());

        if (sessionId < 0) {

            throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);

        }

        int[] session = new int[1];

        session[0] = sessionId;

        // native initialization

        int initResult = native_setup(new WeakReference(this), mAttributes,

                mSampleRate, mChannels, mAudioFormat,

                mNativeBufferSizeInBytes, mDataLoadMode, session);

        if (initResult != SUCCESS) {

            loge("Error code "+initResult+" when initializing AudioTrack.");

            return; // with mState == STATE_UNINITIALIZED

        }

        mSessionId = session[0];

        if (mDataLoadMode == MODE_STATIC) {

            mState = STATE_NO_STATIC_DATA;

        } else {

            mState = STATE_INITIALIZED;

        }

    }

android_media_AudioTrack.cpp (z:\android-5.0.2\frameworks\base\core\jni)   

// ---------------------------------------------------------------------------

static JNINativeMethod gMethods[] = {

    // name,              signature,     funcPtr

    {"native_start",         "()V",      (void *)android_media_AudioTrack_start},

    {"native_stop",          "()V",      (void *)android_media_AudioTrack_stop},

    {"native_pause",         "()V",      (void *)android_media_AudioTrack_pause},

    {"native_flush",         "()V",      (void *)android_media_AudioTrack_flush},

    {"native_setup",     "(Ljava/lang/Object;Ljava/lang/Object;IIIII[I)I",

                                         (void *)android_media_AudioTrack_setup},

    {"native_finalize",      "()V",      (void *)android_media_AudioTrack_finalize},

    {"native_release",       "()V",      (void *)android_media_AudioTrack_release},

    {"native_write_byte",    "([BIIIZ)I",(void *)android_media_AudioTrack_write_byte},

    {"native_write_native_bytes",

                             "(Ljava/lang/Object;IIIZ)I",

                                         (void *)android_media_AudioTrack_write_native_bytes},

    {"native_write_short",   "([SIII)I", (void *)android_media_AudioTrack_write_short},

    {"native_write_float",   "([FIIIZ)I",(void *)android_media_AudioTrack_write_float},

    {"native_setVolume",     "(FF)V",    (void *)android_media_AudioTrack_set_volume},

    {"native_get_native_frame_count",

                             "()I",      (void *)android_media_AudioTrack_get_native_frame_count},

    {"native_set_playback_rate",

                             "(I)I",     (void *)android_media_AudioTrack_set_playback_rate},

    {"native_get_playback_rate",

                             "()I",      (void *)android_media_AudioTrack_get_playback_rate},

    {"native_set_marker_pos","(I)I",     (void *)android_media_AudioTrack_set_marker_pos},

    {"native_get_marker_pos","()I",      (void *)android_media_AudioTrack_get_marker_pos},

    {"native_set_pos_update_period",

                             "(I)I",     (void *)android_media_AudioTrack_set_pos_update_period},

    {"native_get_pos_update_period",

                             "()I",      (void *)android_media_AudioTrack_get_pos_update_period},

    {"native_set_position",  "(I)I",     (void *)android_media_AudioTrack_set_position},

    {"native_get_position",  "()I",      (void *)android_media_AudioTrack_get_position},

    {"native_get_latency",   "()I",      (void *)android_media_AudioTrack_get_latency},

    {"native_get_timestamp", "([J)I",    (void *)android_media_AudioTrack_get_timestamp},

    {"native_set_loop",      "(III)I",   (void *)android_media_AudioTrack_set_loop},

    {"native_reload_static", "()I",      (void *)android_media_AudioTrack_reload},

    {"native_get_output_sample_rate",

                             "(I)I",      (void *)android_media_AudioTrack_get_output_sample_rate},

    {"native_get_min_buff_size",

                             "(III)I",   (void *)android_media_AudioTrack_get_min_buff_size},

    {"native_setAuxEffectSendLevel",

                             "(F)I",     (void *)android_media_AudioTrack_setAuxEffectSendLevel},

    {"native_attachAuxEffect",

                             "(I)I",     (void *)android_media_AudioTrack_attachAuxEffect},

};

android_media_AudioTrack.cpp (z:\android-5.0.2\frameworks\base\core\jni)  

// ----------------------------------------------------------------------------

static jint

android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this,

        jobject jaa,

        jint sampleRateInHertz, jint javaChannelMask,

        jint audioFormat, jint buffSizeInBytes, jint memoryMode, jintArray jSession) {

    ALOGV("sampleRate=%d, audioFormat(from Java)=%d, channel mask=%x, buffSize=%d",

        sampleRateInHertz, audioFormat, javaChannelMask, buffSizeInBytes);

    if (jaa == 0) {

        ALOGE("Error creating AudioTrack: invalid audio attributes");

        return (jint) AUDIO_JAVA_ERROR;

    }

    // Java channel masks don't map directly to the native definition, but it's a simple shift

    // to skip the two deprecated channel configurations "default" and "mono".

    audio_channel_mask_t nativeChannelMask = ((uint32_t)javaChannelMask) >> 2;

    if (!audio_is_output_channel(nativeChannelMask)) {

        ALOGE("Error creating AudioTrack: invalid channel mask %#x.", javaChannelMask);

        return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDCHANNELMASK;

    }

    uint32_t channelCount = audio_channel_count_from_out_mask(nativeChannelMask);

    // check the format.

    // This function was called from Java, so we compare the format against the Java constants

    audio_format_t format = audioFormatToNative(audioFormat);

    if (format == AUDIO_FORMAT_INVALID) {

        ALOGE("Error creating AudioTrack: unsupported audio format %d.", audioFormat);

        return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDFORMAT;

    }

    // for the moment 8bitPCM in MODE_STATIC is not supported natively in the AudioTrack C++ class

    // so we declare everything as 16bitPCM, the 8->16bit conversion for MODE_STATIC will be handled

    // in android_media_AudioTrack_native_write_byte()

    if ((format == AUDIO_FORMAT_PCM_8_BIT)

        && (memoryMode == MODE_STATIC)) {

        ALOGV("android_media_AudioTrack_setup(): requesting MODE_STATIC for 8bit \

            buff size of %dbytes, switching to 16bit, buff size of %dbytes",

            buffSizeInBytes, 2*buffSizeInBytes);

        format = AUDIO_FORMAT_PCM_16_BIT;

        // we will need twice the memory to store the data

        buffSizeInBytes *= 2;

    }

    // compute the frame count

    size_t frameCount;

    if (audio_is_linear_pcm(format)) {

        const size_t bytesPerSample = audio_bytes_per_sample(format);

        frameCount = buffSizeInBytes / (channelCount * bytesPerSample);

    } else {

        frameCount = buffSizeInBytes;

    }

    jclass clazz = env->GetObjectClass(thiz);

    if (clazz == NULL) {

        ALOGE("Can't find %s when setting up callback.", kClassPathName);

        return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;

    }

    if (jSession == NULL) {

        ALOGE("Error creating AudioTrack: invalid session ID pointer");

        return (jint) AUDIO_JAVA_ERROR;

    }

    jint* nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);

    if (nSession == NULL) {

        ALOGE("Error creating AudioTrack: Error retrieving session id pointer");

        return (jint) AUDIO_JAVA_ERROR;

    }

    int sessionId = nSession[0];

    env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);

    nSession = NULL;

    // create the native AudioTrack object

    sp lpTrack = new AudioTrack();

    audio_attributes_t *paa = NULL;

    // read the AudioAttributes values

    paa = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));

    const jstring jtags =

            (jstring) env->GetObjectField(jaa, javaAudioAttrFields.fieldFormattedTags);

    const char* tags = env->GetStringUTFChars(jtags, NULL);

    // copying array size -1, char array for tags was calloc'd, no need to NULL-terminate it

    strncpy(paa->tags, tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1);

    env->ReleaseStringUTFChars(jtags, tags);

    paa->usage = (audio_usage_t) env->GetIntField(jaa, javaAudioAttrFields.fieldUsage);

    paa->content_type =

            (audio_content_type_t) env->GetIntField(jaa, javaAudioAttrFields.fieldContentType);

    paa->flags = env->GetIntField(jaa, javaAudioAttrFields.fieldFlags);

    ALOGV("AudioTrack_setup for usage=%d content=%d flags=0x%#x tags=%s",

            paa->usage, paa->content_type, paa->flags, paa->tags);

    // initialize the callback information:

    // this data will be passed with every AudioTrack callback

    AudioTrackJniStorage* lpJniStorage = new AudioTrackJniStorage();

    lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);

    // we use a weak reference so the AudioTrack object can be garbage collected.

    lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);

    lpJniStorage->mCallbackData.busy = false;

    // initialize the native AudioTrack object

    status_t status = NO_ERROR;

    switch (memoryMode) {

    case MODE_STREAM:

        status = lpTrack->set(

                AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)

                sampleRateInHertz,

                format,// word length, PCM

                nativeChannelMask,

                frameCount,

                AUDIO_OUTPUT_FLAG_NONE,

                audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user)

                0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack

                0,// shared mem

                true,// thread can call Java

                sessionId,// audio session ID

                AudioTrack::TRANSFER_SYNC,

                NULL,                         // default offloadInfo

                -1, -1,                       // default uid, pid values

                paa);

        break;

    case MODE_STATIC:

        // AudioTrack is using shared memory

        if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) {

            ALOGE("Error creating AudioTrack in static mode: error creating mem heap base");

            goto native_init_failure;

        }

        status = lpTrack->set(

                AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)

                sampleRateInHertz,

                format,// word length, PCM

                nativeChannelMask,

                frameCount,

                AUDIO_OUTPUT_FLAG_NONE,

                audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user));

                0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack

                lpJniStorage->mMemBase,// shared mem

                true,// thread can call Java

                sessionId,// audio session ID

                AudioTrack::TRANSFER_SHARED,

                NULL,                         // default offloadInfo

                -1, -1,                       // default uid, pid values

                paa);

        break;

    default:

        ALOGE("Unknown mode %d", memoryMode);

        goto native_init_failure;

    }

    if (status != NO_ERROR) {

        ALOGE("Error %d initializing AudioTrack", status);

        goto native_init_failure;

    }

    nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);

    if (nSession == NULL) {

        ALOGE("Error creating AudioTrack: Error retrieving session id pointer");

        goto native_init_failure;

    }

    // read the audio session ID back from AudioTrack in case we create a new session

    nSession[0] = lpTrack->getSessionId();

    env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);

    nSession = NULL;

    {   // scope for the lock

        Mutex::Autolock l(sLock);

        sAudioTrackCallBackCookies.add(&lpJniStorage->mCallbackData);

    }

    // save our newly created C++ AudioTrack in the "nativeTrackInJavaObj" field

    // of the Java object (in mNativeTrackInJavaObj)

    setAudioTrack(env, thiz, lpTrack);

    // save the JNI resources so we can free them later

    //ALOGV("storing lpJniStorage: %x\n", (long)lpJniStorage);

    env->SetLongField(thiz, javaAudioTrackFields.jniData, (jlong)lpJniStorage);

    // since we had audio attributes, the stream type was derived from them during the

    // creation of the native AudioTrack: push the same value to the Java object

    env->SetIntField(thiz, javaAudioTrackFields.fieldStreamType, (jint) lpTrack->streamType());

    // audio attributes were copied in AudioTrack creation

    free(paa);

    paa = NULL;

    return (jint) AUDIO_JAVA_SUCCESS;

    // failures:

native_init_failure:

    if (paa != NULL) {

        free(paa);

    }

    if (nSession != NULL) {

        env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);

    }

    env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_class);

    env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_ref);

    delete lpJniStorage;

    env->SetLongField(thiz, javaAudioTrackFields.jniData, 0);

    return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;

}

4.10 音频数据的传递

a. APP创建AudioTrack, playbackThread创建对应的Track

   它们之间通过共享内存传递音频数据

b. APP有2种使用共享内存的方式:

b.1 MODE_STATIC:

    APP创建共享内存, APP一次性填充数据

b.2 MODE_STREAM:

    APP使用obtainBuffer获得空白内存, 填充数据后使用releaseBuffer释放内存

c. playbackThread使用obtainBuffer获得含有数据的内存, 使用数据后使用releaseBuffer释放内存

d. AudioTrack中含有mProxy,  它被用来管理共享内存, 里面含有obtainBuffer, releaseBuffer函数

   Track中含有mServerProxy, 它被用来管理共享内存, 里面含有obtainBuffer, releaseBuffer函数

   对于不同的MODE, 这些Proxy指向不同的对象

e. 对于MODE_STREAM, APP和playbackThread使用环型缓冲区的方式传递数据

frameworks\base\media\tests\MediaFrameworkTest\src\com\android\mediaframeworktest\functional\audio\MediaAudioTrackTest.java

    //Test case 5: setLoopPoints() fails for MODE_STREAM

    @LargeTest

    public void testSetLoopPointsStream() throws Exception {

        // constants for test

        final String TEST_NAME = "testSetLoopPointsStream";

        final int TEST_SR = 22050;

        final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;

        final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;

        final int TEST_MODE = AudioTrack.MODE_STREAM;

        final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;

        //-------- initialization --------------

        int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);

// 应用程序创建track,回导致驱动层创建共享内存

        AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,

                2*minBuffSize, TEST_MODE);

        byte data[] = new byte[minBuffSize];

        //--------    test        --------------

        track.write(data, 0, data.length);

        assumeTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);

        assertTrue(TEST_NAME, track.setLoopPoints(2, 50, 2) == AudioTrack.ERROR_INVALID_OPERATION);

        //-------- tear down      --------------

        track.release();

    }

frameworks\base\media\java\android\media\AudioTrack.java

    /**

     * Writes the audio data to the audio sink for playback (streaming mode),

     * or copies audio data for later playback (static buffer mode).

     * In static buffer mode, copies the data to the buffer starting at its 0 offset, and the write

     * mode is ignored.

     * In streaming mode, the blocking behavior will depend on the write mode.

     * @param audioData the buffer that holds the data to play, starting at the position reported

     *     by audioData.position().

     *     
Note that upon return, the buffer position (audioData.position()) will

     *     have been advanced to reflect the amount of data that was successfully written to

     *     the AudioTrack.

     * @param sizeInBytes number of bytes to write.

     *     
Note this may differ from audioData.remaining(), but cannot exceed it.

     * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no

     *     effect in static mode.

     *     
With {@link #WRITE_BLOCKING}, the write will block until all data has been written

     *         to the audio sink.

     *     
With {@link #WRITE_NON_BLOCKING}, the write will return immediately after

     *     queuing as much audio data for playback as possible without blocking.

     * @return 0 or a positive number of bytes that were written, or

     *     {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}

     */

    public int write(ByteBuffer audioData, int sizeInBytes,

            @WriteMode int writeMode) {

        if (mState == STATE_UNINITIALIZED) {

            Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");

            return ERROR_INVALID_OPERATION;

        }

        if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {

            Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");

            return ERROR_BAD_VALUE;

        }

        if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {

            Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");

            return ERROR_BAD_VALUE;

        }

        int ret = 0;

        if (audioData.isDirect()) {

            ret = native_write_native_bytes(audioData,

                    audioData.position(), sizeInBytes, mAudioFormat,

                    writeMode == WRITE_BLOCKING);

        } else {

            ret = native_write_byte(NioUtils.unsafeArray(audioData),

                    NioUtils.unsafeArrayOffset(audioData) + audioData.position(),

                    sizeInBytes, mAudioFormat,

                    writeMode == WRITE_BLOCKING);

        }

        if ((mDataLoadMode == MODE_STATIC)

                && (mState == STATE_NO_STATIC_DATA)

                && (ret > 0)) {

            // benign race with respect to other APIs that read mState

            mState = STATE_INITIALIZED;

        }

        if (ret > 0) {

            audioData.position(audioData.position() + ret);

        }

        return ret;

    }

frameworks\base\core\jni\android_media_AudioTrack.cpp

// ----------------------------------------------------------------------------

static JNINativeMethod gMethods[] = {

    // name,              signature,     funcPtr

    {"native_start",         "()V",      (void *)android_media_AudioTrack_start},

    {"native_stop",          "()V",      (void *)android_media_AudioTrack_stop},

    {"native_pause",         "()V",      (void *)android_media_AudioTrack_pause},

    {"native_flush",         "()V",      (void *)android_media_AudioTrack_flush},

    {"native_setup",     "(Ljava/lang/Object;Ljava/lang/Object;IIIII[I)I",

                                         (void *)android_media_AudioTrack_setup},

    {"native_finalize",      "()V",      (void *)android_media_AudioTrack_finalize},

    {"native_release",       "()V",      (void *)android_media_AudioTrack_release},

    {"native_write_byte",    "([BIIIZ)I",(void *)android_media_AudioTrack_write_byte},

    {"native_write_native_bytes",

                             "(Ljava/lang/Object;IIIZ)I",

                                         (void *)android_media_AudioTrack_write_native_bytes},

    {"native_write_short",   "([SIII)I", (void *)android_media_AudioTrack_write_short},

    {"native_write_float",   "([FIIIZ)I",(void *)android_media_AudioTrack_write_float},

    {"native_setVolume",     "(FF)V",    (void *)android_media_AudioTrack_set_volume},

    {"native_get_native_frame_count",

                             "()I",      (void *)android_media_AudioTrack_get_native_frame_count},

    {"native_set_playback_rate",

                             "(I)I",     (void *)android_media_AudioTrack_set_playback_rate},

    {"native_get_playback_rate",

                             "()I",      (void *)android_media_AudioTrack_get_playback_rate},

    {"native_set_marker_pos","(I)I",     (void *)android_media_AudioTrack_set_marker_pos},

    {"native_get_marker_pos","()I",      (void *)android_media_AudioTrack_get_marker_pos},

    {"native_set_pos_update_period",

                             "(I)I",     (void *)android_media_AudioTrack_set_pos_update_period},

    {"native_get_pos_update_period",

                             "()I",      (void *)android_media_AudioTrack_get_pos_update_period},

    {"native_set_position",  "(I)I",     (void *)android_media_AudioTrack_set_position},

    {"native_get_position",  "()I",      (void *)android_media_AudioTrack_get_position},

    {"native_get_latency",   "()I",      (void *)android_media_AudioTrack_get_latency},

    {"native_get_timestamp", "([J)I",    (void *)android_media_AudioTrack_get_timestamp},

    {"native_set_loop",      "(III)I",   (void *)android_media_AudioTrack_set_loop},

    {"native_reload_static", "()I",      (void *)android_media_AudioTrack_reload},

    {"native_get_output_sample_rate",

                             "(I)I",      (void *)android_media_AudioTrack_get_output_sample_rate},

    {"native_get_min_buff_size",

                             "(III)I",   (void *)android_media_AudioTrack_get_min_buff_size},

    {"native_setAuxEffectSendLevel",

                             "(F)I",     (void *)android_media_AudioTrack_setAuxEffectSendLevel},

    {"native_attachAuxEffect",

                             "(I)I",     (void *)android_media_AudioTrack_attachAuxEffect},

};

frameworks\base\core\jni\android_media_AudioTrack.cpp

// ----------------------------------------------------------------------------

static jint android_media_AudioTrack_write_byte(JNIEnv *env,  jobject thiz,

                                                  jbyteArray javaAudioData,

                                                  jint offsetInBytes, jint sizeInBytes,

                                                  jint javaAudioFormat,

                                                  jboolean isWriteBlocking) {

    //ALOGV("android_media_AudioTrack_write_byte(offset=%d, sizeInBytes=%d) called",

    //    offsetInBytes, sizeInBytes);

    sp lpTrack = getAudioTrack(env, thiz);  //  把java对象转换成c++对象

    if (lpTrack == NULL) {

        jniThrowException(env, "java/lang/IllegalStateException",

            "Unable to retrieve AudioTrack pointer for write()");

        return 0;

    }

    // get the pointer for the audio data from the java array

    // NOTE: We may use GetPrimitiveArrayCritical() when the JNI implementation changes in such

    // a way that it becomes much more efficient. When doing so, we will have to prevent the

    // AudioSystem callback to be called while in critical section (in case of media server

    // process crash for instance)

    jbyte* cAudioData = NULL;

    if (javaAudioData) {

        cAudioData = (jbyte *)env->GetByteArrayElements(javaAudioData, NULL);

        if (cAudioData == NULL) {

            ALOGE("Error retrieving source of audio data to play, can't play");

            return 0; // out of memory or no data to load

        }

    } else {

        ALOGE("NULL java array of audio data to play, can't play");

        return 0;

    }

    jint written = writeToTrack(lpTrack, javaAudioFormat, cAudioData, offsetInBytes, sizeInBytes,

            isWriteBlocking == JNI_TRUE /* blocking */);

    env->ReleaseByteArrayElements(javaAudioData, cAudioData, 0);

    //ALOGV("write wrote %d (tried %d) bytes in the native AudioTrack with offset %d",

    //     (int)written, (int)(sizeInBytes), (int)offsetInBytes);

    return written;

}

frameworks\base\core\jni\android_media_AudioTrack.cpp

// ----------------------------------------------------------------------------

jint writeToTrack(const sp& track, jint audioFormat, const jbyte* data,

                  jint offsetInBytes, jint sizeInBytes, bool blocking = true) {

    // give the data to the native AudioTrack object (the data starts at the offset)

    ssize_t written = 0;

    // regular write() or copy the data to the AudioTrack's shared memory?

   // 如果应用程序没有提供共享内存,则调用write函数写数据

    if (track->sharedBuffer() == 0) {

        written = track->write(data + offsetInBytes, sizeInBytes, blocking);

        // for compatibility with earlier behavior of write(), return 0 in this case

        if (written == (ssize_t) WOULD_BLOCK) {

            written = 0;

        }

    } else {

        const audio_format_t format = audioFormatToNative(audioFormat);

        switch (format) {

        default:

        case AUDIO_FORMAT_PCM_FLOAT:

        case AUDIO_FORMAT_PCM_16_BIT: {

            // writing to shared memory, check for capacity

            if ((size_t)sizeInBytes > track->sharedBuffer()->size()) {

                sizeInBytes = track->sharedBuffer()->size();

            }

// 如果应用程序提供了共享内存则直接copy

            memcpy(track->sharedBuffer()->pointer(), data + offsetInBytes, sizeInBytes);

            written = sizeInBytes;

            } break;

        case AUDIO_FORMAT_PCM_8_BIT: {

            // data contains 8bit data we need to expand to 16bit before copying

            // to the shared memory

            // writing to shared memory, check for capacity,

            // note that input data will occupy 2X the input space due to 8 to 16bit conversion

            if (((size_t)sizeInBytes)*2 > track->sharedBuffer()->size()) {

                sizeInBytes = track->sharedBuffer()->size() / 2;

            }

            int count = sizeInBytes;

            int16_t *dst = (int16_t *)track->sharedBuffer()->pointer();

            const uint8_t *src = (const uint8_t *)(data + offsetInBytes);

            memcpy_to_i16_from_u8(dst, src, count);

            // even though we wrote 2*sizeInBytes, we only report sizeInBytes as written to hide

            // the 8bit mixer restriction from the user of this function

            written = sizeInBytes;

            } break;

        }

    }

    return written;

}

frameworks\av\media\libmedia\AudioTrack.cpp

// -------------------------------------------------------------------------

ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)

{

    if (mTransfer != TRANSFER_SYNC || mIsTimed) {

        return INVALID_OPERATION;

    }

    if (isDirect()) {

        AutoMutex lock(mLock);

        int32_t flags = android_atomic_and(

                            ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),

                            &mCblk->mFlags);

        if (flags & CBLK_INVALID) {

            return DEAD_OBJECT;

        }

    }

    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {

        // Sanity-check: user is most-likely passing an error code, and it would

        // make the return value ambiguous (actualSize vs error).

        ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);

        return BAD_VALUE;

    }

    size_t written = 0;

    Buffer audioBuffer;

    while (userSize >= mFrameSize) {

        audioBuffer.frameCount = userSize / mFrameSize;

// 获得空白buff

        status_t err = obtainBuffer(&audioBuffer,

                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);

        if (err < 0) {

            if (written > 0) {

                break;

            }

            return ssize_t(err);

        }

        size_t toWrite;

        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {

            // Divide capacity by 2 to take expansion into account

            toWrite = audioBuffer.size >> 1;

            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);

        } else {

            toWrite = audioBuffer.size;

            memcpy(audioBuffer.i8, buffer, toWrite);

        }

        buffer = ((const char *) buffer) + toWrite;

        userSize -= toWrite;

        written += toWrite;

        releaseBuffer(&audioBuffer);

    }

    return written;

}

frameworks\av\services\audioflinger\Tracks.cpp

// AudioBufferProvider interface

status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(

        AudioBufferProvider::Buffer* buffer, int64_t pts __unused)

{

    ServerProxy::Buffer buf;

    size_t desiredFrames = buffer->frameCount;

    buf.mFrameCount = desiredFrames;

  // 获得数据,处理数据,但是没有releasebuff操作

    status_t status = mServerProxy->obtainBuffer(&buf);

    buffer->frameCount = buf.mFrameCount;

    buffer->raw = buf.mRaw;

    if (buf.mFrameCount == 0) {

        mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);

    }

    return status;

}

// releaseBuffer() is not overridden

// ExtendedAudioBufferProvider interface

frameworks\av\services\audioflinger\Tracks.cpp

// AudioBufferProvider interface

// getNextBuffer() = 0;

// This implementation of releaseBuffer() is used by Track and RecordTrack, but not TimedTrack

void AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer)

{

#ifdef TEE_SINK

    if (mTeeSink != 0) {

        (void) mTeeSink->write(buffer->raw, buffer->frameCount);

    }

#endif

    ServerProxy::Buffer buf;

    buf.mFrameCount = buffer->frameCount;

    buf.mRaw = buffer->raw;

    buffer->frameCount = 0;

    buffer->raw = NULL;

    // 基类TrackBase调用releaseBuffer释放buff

    mServerProxy->releaseBuffer(&buf);

}

frameworks\av\services\audioflinger\Tracks.cpp

// ----------------------------------------------------------------------------

// Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held

AudioFlinger::PlaybackThread::Track::Track(

            PlaybackThread *thread,

            const sp& client,

            audio_stream_type_t streamType,

            uint32_t sampleRate,

            audio_format_t format,

            audio_channel_mask_t channelMask,

            size_t frameCount,

            void *buffer,

            const sp& sharedBuffer,

            int sessionId,

            int uid,

            IAudioFlinger::track_flags_t flags,

            track_type type)

    :   TrackBase(thread, client, sampleRate, format, channelMask, frameCount,

                  (sharedBuffer != 0) ? sharedBuffer->pointer() : buffer,

                  sessionId, uid, flags, true /*isOut*/,

                  (type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK,

                  type),

    mFillingUpStatus(FS_INVALID),

    // mRetryCount initialized later when needed

    mSharedBuffer(sharedBuffer),

    mStreamType(streamType),

    mName(-1),  // see note below

    mMainBuffer(thread->mixBuffer()),

    mAuxBuffer(NULL),

    mAuxEffectId(0), mHasVolumeController(false),

    mPresentationCompleteFrames(0),

    mFastIndex(-1),

    mCachedVolume(1.0),

    mIsInvalid(false),

    mAudioTrackServerProxy(NULL),

    mResumeToStopping(false),

    mFlushHwPending(false),

    mPreviousValid(false),

    mPreviousFramesWritten(0)

    // mPreviousTimestamp

{

    // client == 0 implies sharedBuffer == 0

    ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));

    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),

            sharedBuffer->size());

    if (mCblk == NULL) {

        return;

    }

    if (sharedBuffer == 0) {

    // 如果没有应用程序没有创建buff,则使用AudioTrackServerProxy创建buff,管理buff

        mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,

                mFrameSize, !isExternalTrack(), sampleRate);

    } else {

// 应用程序创建buff,则StaticAudioTrackServerProxy来管理buff

        mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,

                mFrameSize);

    }

    mServerProxy = mAudioTrackServerProxy;

    mName = thread->getTrackName_l(channelMask, format, sessionId);

    if (mName < 0) {

        ALOGE("no more track names available");

        return;

    }

    // only allocate a fast track index if we were able to allocate a normal track name

    if (flags & IAudioFlinger::TRACK_FAST) {

        mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads();

        ALOG_ASSERT(thread->mFastTrackAvailMask != 0);

        int i = __builtin_ctz(thread->mFastTrackAvailMask);

        ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks);

        // FIXME This is too eager.  We allocate a fast track index before the

        //       fast track becomes active.  Since fast tracks are a scarce resource,

        //       this means we are potentially denying other more important fast tracks from

        //       being created.  It would be better to allocate the index dynamically.

        mFastIndex = i;

        // Read the initial underruns because this field is never cleared by the fast mixer

        mObservedUnderruns = thread->getFastTrackUnderruns(i);

        thread->mFastTrackAvailMask &= ~(1 << i);

    }

}

frameworks\av\media\libmedia\AudioTrackShared.cpp

.mPosition = 0

.mEnd =frameCount 初值

// ---------------------------------------------------------------------------

StaticAudioTrackServerProxy::StaticAudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers,

        size_t frameCount, size_t frameSize)

    : AudioTrackServerProxy(cblk, buffers, frameCount, frameSize),

      mObserver(&cblk->u.mStatic.mSingleStateQueue), mPosition(0),

      mEnd(frameCount), mFramesReadyIsCalledByMultipleThreads(false)

{

    mState.mLoopStart = 0;

    mState.mLoopEnd = 0;

    mState.mLoopCount = 0;

}

frameworks\av\media\libmedia\AudioTrackShared.cpp

obtainBuffer 返回mPosition 所示位置

status_t StaticAudioTrackServerProxy::obtainBuffer(Buffer* buffer, bool ackFlush __unused)

{

    if (mIsShutdown) {

        buffer->mFrameCount = 0;

        buffer->mRaw = NULL;

        buffer->mNonContig = 0;

        mUnreleased = 0;

        return NO_INIT;

    }

  // 获得数据

    ssize_t positionOrStatus = pollPosition();

    if (positionOrStatus < 0) {

        buffer->mFrameCount = 0;

        buffer->mRaw = NULL;

        buffer->mNonContig = 0;

        mUnreleased = 0;

        return (status_t) positionOrStatus;

    }

    size_t position = (size_t) positionOrStatus;

    size_t avail;

    if (position < mEnd) {

        avail = mEnd - position;

        size_t wanted = buffer->mFrameCount;

        if (avail < wanted) {

            buffer->mFrameCount = avail;

        } else {

            avail = wanted;

        }

        buffer->mRaw = &((char *) mBuffers)[position * mFrameSize];

    } else {

        avail = 0;

        buffer->mFrameCount = 0;

        buffer->mRaw = NULL;

    }

    buffer->mNonContig = 0;     // FIXME should be > 0 for looping

    mUnreleased = avail;

    return NO_ERROR;

}

frameworks\av\include\private\media\AudioTrackShared.h

    struct Buffer {

        size_t  mFrameCount;            // number of frames available in this buffer 想取几帧数据

        void*   mRaw;                   // pointer to first frame         执行first frame

        size_t  mNonContig;             // number of additional non-contiguous frames available

    };

frameworks\av\media\libmedia\AudioTrackShared.cpp

void StaticAudioTrackServerProxy::releaseBuffer(Buffer* buffer)

{

    size_t stepCount = buffer->mFrameCount;

    LOG_ALWAYS_FATAL_IF(!(stepCount <= mUnreleased));

    if (stepCount == 0) {

        // prevent accidental re-use of buffer

        buffer->mRaw = NULL;

        buffer->mNonContig = 0;

        return;

    }

    mUnreleased -= stepCount;

    audio_track_cblk_t* cblk = mCblk;

    size_t position = mPosition;

    size_t newPosition = position + stepCount;

    int32_t setFlags = 0;

    if (!(position <= newPosition && newPosition <= mFrameCount)) {

        ALOGW("%s newPosition %zu outside [%zu, %zu]", __func__, newPosition, position, mFrameCount);

        newPosition = mFrameCount;

    } else if (mState.mLoopCount != 0 && newPosition == mState.mLoopEnd) {

        if (mState.mLoopCount == -1 || --mState.mLoopCount != 0) {

            newPosition = mState.mLoopStart;

            setFlags = CBLK_LOOP_CYCLE;

        } else {

            mEnd = mFrameCount;     // this is what allows playback to continue after the loop

            setFlags = CBLK_LOOP_FINAL;

        }

    }

    if (newPosition == mFrameCount) {

        setFlags |= CBLK_BUFFER_END;

    }

  // 调整新位置

    mPosition = newPosition;

    cblk->mServer += stepCount;

    // This may overflow, but client is not supposed to rely on it

    cblk->u.mStatic.mBufferPosition = (uint32_t) newPosition;

    if (setFlags != 0) {

        (void) android_atomic_or(setFlags, &cblk->mFlags);

        // this would be a good place to wake a futex

    }

    buffer->mFrameCount = 0;

    buffer->mRaw = NULL;

    buffer->mNonContig = 0;

}

frameworks\av\media\libmedia\AudioTrackShared.cpp

mFront : R指针

mRear: W指针

mFrameCount: LEN

mFrameCountP2: LEN向上取为2的幂

#define MEASURE_NS 10000000 // attempt to provide accurate timeouts if requested >= MEASURE_NS

// To facilitate quicker recovery from server failure, this value limits the timeout per each futex

// wait.  However it does not protect infinite timeouts.  If defined to be zero, there is no limit.

// FIXME May not be compatible with audio tunneling requirements where timeout should be in the

// order of minutes.

#define MAX_SEC    5

status_t ClientProxy::obtainBuffer(Buffer* buffer, const struct timespec *requested,

        struct timespec *elapsed)

{

    LOG_ALWAYS_FATAL_IF(buffer == NULL || buffer->mFrameCount == 0);

    struct timespec total;          // total elapsed time spent waiting

    total.tv_sec = 0;

    total.tv_nsec = 0;

    bool measure = elapsed != NULL; // whether to measure total elapsed time spent waiting

    status_t status;

    enum {

        TIMEOUT_ZERO,       // requested == NULL || *requested == 0

        TIMEOUT_INFINITE,   // *requested == infinity

        TIMEOUT_FINITE,     // 0 < *requested < infinity

        TIMEOUT_CONTINUE,   // additional chances after TIMEOUT_FINITE

    } timeout;

    if (requested == NULL) {

        timeout = TIMEOUT_ZERO;

    } else if (requested->tv_sec == 0 && requested->tv_nsec == 0) {

        timeout = TIMEOUT_ZERO;

    } else if (requested->tv_sec == INT_MAX) {

        timeout = TIMEOUT_INFINITE;

    } else {

        timeout = TIMEOUT_FINITE;

        if (requested->tv_sec > 0 || requested->tv_nsec >= MEASURE_NS) {

            measure = true;

        }

    }

    struct timespec before;

    bool beforeIsValid = false;

    audio_track_cblk_t* cblk = mCblk;

    bool ignoreInitialPendingInterrupt = true;

    // check for shared memory corruption

    if (mIsShutdown) {

        status = NO_INIT;

        goto end;

    }

    for (;;) {

        int32_t flags = android_atomic_and(~CBLK_INTERRUPT, &cblk->mFlags);

        // check for track invalidation by server, or server death detection

        if (flags & CBLK_INVALID) {

            ALOGV("Track invalidated");

            status = DEAD_OBJECT;

            goto end;

        }

        // check for obtainBuffer interrupted by client

        if (!ignoreInitialPendingInterrupt && (flags & CBLK_INTERRUPT)) {

            ALOGV("obtainBuffer() interrupted by client");

            status = -EINTR;

            goto end;

        }

        ignoreInitialPendingInterrupt = false;

        // compute number of frames available to write (AudioTrack) or read (AudioRecord)

        int32_t front;   

        int32_t rear;

        if (mIsOut) {

            // The barrier following the read of mFront is probably redundant.

            // We're about to perform a conditional branch based on 'filled',

            // which will force the processor to observe the read of mFront

            // prior to allowing data writes starting at mRaw.

            // However, the processor may support speculative execution,

            // and be unable to undo speculative writes into shared memory.

            // The barrier will prevent such speculative execution.

            front = android_atomic_acquire_load(&cblk->u.mStreaming.mFront);  

            rear = cblk->u.mStreaming.mRear;

        } else {

            // On the other hand, this barrier is required.

            rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);

            front = cblk->u.mStreaming.mFront;

        }

        ssize_t filled = rear - front;

        // pipe should not be overfull

        if (!(0 <= filled && (size_t) filled <= mFrameCount)) {

            if (mIsOut) {

                ALOGE("Shared memory control block is corrupt (filled=%zd, mFrameCount=%zu); "

                        "shutting down", filled, mFrameCount);

                mIsShutdown = true;

                status = NO_INIT;

                goto end;

            }

            // for input, sync up on overrun

            filled = 0;

            cblk->u.mStreaming.mFront = rear;

            (void) android_atomic_or(CBLK_OVERRUN, &cblk->mFlags);

        }

        // don't allow filling pipe beyond the nominal size

        size_t avail = mIsOut ? mFrameCount - filled : filled;

        if (avail > 0) {

            // 'avail' may be non-contiguous, so return only the first contiguous chunk

            size_t part1;

            if (mIsOut) {

                rear &= mFrameCountP2 - 1;

                part1 = mFrameCountP2 - rear;

            } else {

                front &= mFrameCountP2 - 1;

                part1 = mFrameCountP2 - front;

            }

            if (part1 > avail) {

                part1 = avail;

            }

            if (part1 > buffer->mFrameCount) {

                part1 = buffer->mFrameCount;

            }

            buffer->mFrameCount = part1;

            buffer->mRaw = part1 > 0 ?

                    &((char *) mBuffers)[(mIsOut ? rear : front) * mFrameSize] : NULL;

            buffer->mNonContig = avail - part1;

            mUnreleased = part1;

            status = NO_ERROR;

            break;

        }

        struct timespec remaining;

        const struct timespec *ts;

        switch (timeout) {

        case TIMEOUT_ZERO:

            status = WOULD_BLOCK;

            goto end;

        case TIMEOUT_INFINITE:

            ts = NULL;

            break;

        case TIMEOUT_FINITE:

            timeout = TIMEOUT_CONTINUE;

            if (MAX_SEC == 0) {

                ts = requested;

                break;

            }

            // fall through

        case TIMEOUT_CONTINUE:

            // FIXME we do not retry if requested < 10ms? needs documentation on this state machine

            if (!measure || requested->tv_sec < total.tv_sec ||

                    (requested->tv_sec == total.tv_sec && requested->tv_nsec <= total.tv_nsec)) {

                status = TIMED_OUT;

                goto end;

            }

            remaining.tv_sec = requested->tv_sec - total.tv_sec;

            if ((remaining.tv_nsec = requested->tv_nsec - total.tv_nsec) < 0) {

                remaining.tv_nsec += 1000000000;

                remaining.tv_sec++;

            }

            if (0 < MAX_SEC && MAX_SEC < remaining.tv_sec) {

                remaining.tv_sec = MAX_SEC;

                remaining.tv_nsec = 0;

            }

            ts = &remaining;

            break;

        default:

            LOG_ALWAYS_FATAL("obtainBuffer() timeout=%d", timeout);

            ts = NULL;

            break;

        }

        int32_t old = android_atomic_and(~CBLK_FUTEX_WAKE, &cblk->mFutex);

        if (!(old & CBLK_FUTEX_WAKE)) {

            if (measure && !beforeIsValid) {

                clock_gettime(CLOCK_MONOTONIC, &before);

                beforeIsValid = true;

            }

            errno = 0;

            (void) syscall(__NR_futex, &cblk->mFutex,

                    mClientInServer ? FUTEX_WAIT_PRIVATE : FUTEX_WAIT, old & ~CBLK_FUTEX_WAKE, ts);

            // update total elapsed time spent waiting

            if (measure) {

                struct timespec after;

                clock_gettime(CLOCK_MONOTONIC, &after);

                total.tv_sec += after.tv_sec - before.tv_sec;

                long deltaNs = after.tv_nsec - before.tv_nsec;

                if (deltaNs < 0) {

                    deltaNs += 1000000000;

                    total.tv_sec--;

                }

                if ((total.tv_nsec += deltaNs) >= 1000000000) {

                    total.tv_nsec -= 1000000000;

                    total.tv_sec++;

                }

                before = after;

                beforeIsValid = true;

            }

            switch (errno) {

            case 0:            // normal wakeup by server, or by binderDied()

            case EWOULDBLOCK:  // benign race condition with server

            case EINTR:        // wait was interrupted by signal or other spurious wakeup

            case ETIMEDOUT:    // time-out expired

                // FIXME these error/non-0 status are being dropped

                break;

            default:

                status = errno;

                ALOGE("%s unexpected error %s", __func__, strerror(status));

                goto end;

            }

        }

    }

end:

    if (status != NO_ERROR) {

        buffer->mFrameCount = 0;

        buffer->mRaw = NULL;

        buffer->mNonContig = 0;

        mUnreleased = 0;

    }

    if (elapsed != NULL) {

        *elapsed = total;

    }

    if (requested == NULL) {

        requested = &kNonBlocking;

    }

    if (measure) {

        ALOGV("requested %ld.%03ld elapsed %ld.%03ld",

              requested->tv_sec, requested->tv_nsec / 1000000,

              total.tv_sec, total.tv_nsec / 1000000);

    }

    return status;

}

frameworks\av\media\libmedia\AudioTrackShared.cpp

void ClientProxy::releaseBuffer(Buffer* buffer)

{

    LOG_ALWAYS_FATAL_IF(buffer == NULL);

size_t stepCount = buffer->mFrameCount;

    if (stepCount == 0 || mIsShutdown) {

        // prevent accidental re-use of buffer

        buffer->mFrameCount = 0;

        buffer->mRaw = NULL;

        buffer->mNonContig = 0;

        return;

    }

    LOG_ALWAYS_FATAL_IF(!(stepCount <= mUnreleased && mUnreleased <= mFrameCount));

    mUnreleased -= stepCount;

    audio_track_cblk_t* cblk = mCblk;

    // Both of these barriers are required

    if (mIsOut) {

        int32_t rear = cblk->u.mStreaming.mRear;

        android_atomic_release_store(stepCount + rear, &cblk->u.mStreaming.mRear);

    } else {

        int32_t front = cblk->u.mStreaming.mFront;

        android_atomic_release_store(stepCount + front, &cblk->u.mStreaming.mFront);

    }

}

4.11 PlaybackThread处理流程

a. prepareTracks_l :

   确定enabled track, disabled track

   对于enabled track, 设置mState.tracks[x]中的参数

b. threadLoop_mix : 处理数据(比如重采样)、混音

   确定hook:

   逐个分析mState.tracks[x]的数据, 根据它的格式确定tracks[x].hook

   再确定总的mState.hook

   调用hook:

   调用总的mState.hook即可, 它会再去调用每一个mState.tracks[x].hook

   混音后的数据会放在mState.outputTemp临时BUFFER中

   然后转换格式后存入 thread.mMixerBuffer

c. memcpy_by_audio_format :

   把数据从thread.mMixerBuffer或thread.mEffectBuffer复制到thread.mSinkBuffer

d. threadLoop_write:

   把thread.mSinkBuffer写到声卡上

e. threadLoop_exit

Android 4.4 KitKat的环形缓冲机制

http://blog.sina.com.cn/s/blog_4d2f77990102ux8m.html

《深入解析Android 5.0系统》——第6章,第6.1节原子操作

https://yq.aliyun.com/articles/95441

内存屏障

http://ifeve.com/memory-barriers-or-fences/

frameworks\av\services\audioflinger\AudioMixer.cpp

比如说手机静音了,不做任何处理,

// no-op case

void AudioMixer::process__nop(state_t* state, int64_t pts)

{

    ALOGVV("process__nop\n");

    uint32_t e0 = state->enabledTracks;

    while (e0) {

        // process by group of tracks with same output buffer to

        // avoid multiple memset() on same buffer

        uint32_t e1 = e0, e2 = e0;

        int i = 31 - __builtin_clz(e1);

        {

            track_t& t1 = state->tracks[i];

            e2 &= ~(1<

            while (e2) {

                i = 31 - __builtin_clz(e2);

                e2 &= ~(1<

                track_t& t2 = state->tracks[i];

                if (CC_UNLIKELY(t2.mainBuffer != t1.mainBuffer)) {

                    e1 &= ~(1<

                }

            }

            e0 &= ~(e1);

            memset(t1.mainBuffer, 0, state->frameCount * t1.mMixerChannelCount

                    * audio_bytes_per_sample(t1.mMixerFormat));

        }

        while (e1) {

            i = 31 - __builtin_clz(e1);

            e1 &= ~(1<

            {

                track_t& t3 = state->tracks[i];

                size_t outFrames = state->frameCount;

                while (outFrames) {

                    t3.buffer.frameCount = outFrames;

                    int64_t outputPTS = calculateOutputPTS(

                        t3, pts, state->frameCount - outFrames);

                    t3.bufferProvider->getNextBuffer(&t3.buffer, outputPTS);

                    if (t3.buffer.raw == NULL) break;

                    outFrames -= t3.buffer.frameCount;

                    t3.bufferProvider->releaseBuffer(&t3.buffer);

                }

            }

        }

    }

}

frameworks\av\services\audioflinger\AudioMixer.cpp

如果是声卡支持的格式,就不需要重采样

// generic code without resampling

void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts)

{

    ALOGVV("process__genericNoResampling\n");

    int32_t outTemp[BLOCKSIZE * MAX_NUM_CHANNELS] __attribute__((aligned(32)));

    // acquire each track's buffer

    uint32_t enabledTracks = state->enabledTracks;

    uint32_t e0 = enabledTracks;

    while (e0) {

        const int i = 31 - __builtin_clz(e0);

        e0 &= ~(1<

        track_t& t = state->tracks[i];

        t.buffer.frameCount = state->frameCount;

        t.bufferProvider->getNextBuffer(&t.buffer, pts);

        t.frameCount = t.buffer.frameCount;

        t.in = t.buffer.raw;

    }

    e0 = enabledTracks;

    while (e0) {

        // process by group of tracks with same output buffer to

        // optimize cache use

        uint32_t e1 = e0, e2 = e0;

        int j = 31 - __builtin_clz(e1);

        track_t& t1 = state->tracks[j];

        e2 &= ~(1<

        while (e2) {

            j = 31 - __builtin_clz(e2);

            e2 &= ~(1<

            track_t& t2 = state->tracks[j];

            if (CC_UNLIKELY(t2.mainBuffer != t1.mainBuffer)) {

                e1 &= ~(1<

            }

        }

        e0 &= ~(e1);

        // this assumes output 16 bits stereo, no resampling

        int32_t *out = t1.mainBuffer;

        size_t numFrames = 0;

        do {

            memset(outTemp, 0, sizeof(outTemp));

            e2 = e1;

            while (e2) {

                const int i = 31 - __builtin_clz(e2);

                e2 &= ~(1<

                track_t& t = state->tracks[i];

                size_t outFrames = BLOCKSIZE;

                int32_t *aux = NULL;

                if (CC_UNLIKELY(t.needs & NEEDS_AUX)) {

                    aux = t.auxBuffer + numFrames;

                }

                while (outFrames) {

                    // t.in == NULL can happen if the track was flushed just after having

                    // been enabled for mixing.

                   if (t.in == NULL) {

                        enabledTracks &= ~(1<

                        e1 &= ~(1<

                        break;

                    }

                    size_t inFrames = (t.frameCount > outFrames)?outFrames:t.frameCount;

                    if (inFrames > 0) {

                        t.hook(&t, outTemp + (BLOCKSIZE - outFrames) * t.mMixerChannelCount,

                                inFrames, state->resampleTemp, aux);

                        t.frameCount -= inFrames;

                        outFrames -= inFrames;

                        if (CC_UNLIKELY(aux != NULL)) {

                            aux += inFrames;

                        }

                    }

                    if (t.frameCount == 0 && outFrames) {

                        t.bufferProvider->releaseBuffer(&t.buffer);

                        t.buffer.frameCount = (state->frameCount - numFrames) -

                                (BLOCKSIZE - outFrames);

                        int64_t outputPTS = calculateOutputPTS(

                            t, pts, numFrames + (BLOCKSIZE - outFrames));

                        t.bufferProvider->getNextBuffer(&t.buffer, outputPTS);

                        t.in = t.buffer.raw;

                        if (t.in == NULL) {

                            enabledTracks &= ~(1<

                            e1 &= ~(1<

                            break;

                        }

                        t.frameCount = t.buffer.frameCount;

                    }

                }

            }

            convertMixerFormat(out, t1.mMixerFormat, outTemp, t1.mMixerInFormat,

                    BLOCKSIZE * t1.mMixerChannelCount);

            // TODO: fix ugly casting due to choice of out pointer type

            out = reinterpret_cast((uint8_t*)out

                    + BLOCKSIZE * t1.mMixerChannelCount

                        * audio_bytes_per_sample(t1.mMixerFormat));

            numFrames += BLOCKSIZE;

        } while (numFrames < state->frameCount);

    }

    // release each track's buffer

    e0 = enabledTracks;

    while (e0) {

        const int i = 31 - __builtin_clz(e0);

        e0 &= ~(1<

        track_t& t = state->tracks[i];

        t.bufferProvider->releaseBuffer(&t.buffer);

    }

}

frameworks\av\services\audioflinger\AudioMixer.cpp

// generic code with resampling

void AudioMixer::process__genericResampling(state_t* state, int64_t pts)

{

    ALOGVV("process__genericResampling\n");

    // this const just means that local variable outTemp doesn't change

    int32_t* const outTemp = state->outputTemp;

    size_t numFrames = state->frameCount;

    uint32_t e0 = state->enabledTracks;

    while (e0) {

        // process by group of tracks with same output buffer

        // to optimize cache use

        uint32_t e1 = e0, e2 = e0;

        int j = 31 - __builtin_clz(e1);

        track_t& t1 = state->tracks[j];

        e2 &= ~(1<

        while (e2) {

            j = 31 - __builtin_clz(e2);

            e2 &= ~(1<

            track_t& t2 = state->tracks[j];

            if (CC_UNLIKELY(t2.mainBuffer != t1.mainBuffer)) {

                e1 &= ~(1<

            }

        }

        e0 &= ~(e1);

        int32_t *out = t1.mainBuffer;

        memset(outTemp, 0, sizeof(*outTemp) * t1.mMixerChannelCount * state->frameCount);

        while (e1) {

            const int i = 31 - __builtin_clz(e1);

            e1 &= ~(1<

            track_t& t = state->tracks[i];

            int32_t *aux = NULL;

            if (CC_UNLIKELY(t.needs & NEEDS_AUX)) {

                aux = t.auxBuffer;

            }

            // this is a little goofy, on the resampling case we don't

            // acquire/release the buffers because it's done by

            // the resampler.

            if (t.needs & NEEDS_RESAMPLE) {

                t.resampler->setPTS(pts);

                t.hook(&t, outTemp, numFrames, state->resampleTemp, aux);

            } else {

                size_t outFrames = 0;

                while (outFrames < numFrames) {

                    t.buffer.frameCount = numFrames - outFrames;

                    int64_t outputPTS = calculateOutputPTS(t, pts, outFrames);

                    t.bufferProvider->getNextBuffer(&t.buffer, outputPTS);

                    t.in = t.buffer.raw;

                    // t.in == NULL can happen if the track was flushed just after having

                    // been enabled for mixing.

                    if (t.in == NULL) break;

                    if (CC_UNLIKELY(aux != NULL)) {

                        aux += outFrames;

                    }

                    t.hook(&t, outTemp + outFrames * t.mMixerChannelCount, t.buffer.frameCount,

                            state->resampleTemp, aux);

                    outFrames += t.buffer.frameCount;

                    t.bufferProvider->releaseBuffer(&t.buffer);

                }

            }

        }

        convertMixerFormat(out, t1.mMixerFormat,

                outTemp, t1.mMixerInFormat, numFrames * t1.mMixerChannelCount);

    }

}

发布了56 篇原创文章 · 获赞 53 · 访问量 10万+

猜你喜欢

转载自blog.csdn.net/weixin_42082222/article/details/104030523
今日推荐