Android audio output device selection process analysis (on)

        As everyone (audio people (there should be a laughing and crying expression) knows, to play a sound, after creating AudioTrack, when the set function sets parameters, the createTrack_l function will be called.

The first step: get output

createTrack_l uses binder to call AudioFlinger's createTrack before creating AudioFlinger::Track, it will first call getOutputForAttr:

status = AudioSystem::getOutputForAttr(attr, &output,
                                           mSessionId, &streamType, mClientUid,
                                           &config,
                                           mFlags, &mRoutedDeviceId, &mPortId); = AudioSystem::getOutputForAttr(attr, &output,
                                           mSessionId, &streamType, mClientUid,
                                           &config,
                                           mFlags, &mRoutedDeviceId, &mPortId);

According to the incoming audio_attributes_t and other parameters, bring back the output (this is an output parameter)!

This call is implemented in AudioPolicyManager.

AudioPolicyManager::getOutputForAttr:


//准备设置好attributes
...
//根据attributes选中Strategy
routing_strategy strategy = (routing_strategy) getStrategyForAttr(&attributes);
...
//根据Strategy选中device
audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
...
//根据选定的device获取Output
*output = getOutputForDevice(device, session, *stream,
                                    config->sample_rate, 
                                    config->format,
                                    config->channel_mask,
                                    flags,
                                    &config->offload_info);//Get ready to set attributes 
... //Select Strategy routing_strategy strategy = ( routing_strategy ) getStrategyForAttr ( & attributes ); 
... //Select device audio_devices_t device = getDeviceForStrategy ( strategy , false /*fromCache*/ ) according to Strategy ; 
... //Get Output according to the selected device * output = getOutputForDevice ( device , session , * stream ,
                                    

  

    

  config->sample_rate, 
                                    config->format,
                                    config->channel_mask,
                                    flags,
                                    &config->offload_info);

这个名字取得很有意思哈,一步步往下推进的感觉。

getOutputForAttr: attr>>strategy>>device>>output

其中,getDeviceForStrategy这一步真正决定了使用什么设备.

比如说,下面的例子是播放音乐(AUDIO_STREAM_MUSIC)的时候选中的Strategy:

//代码节选自Engine::getDeviceForStrategyInt
case STRATEGY_MEDIA: {
        uint32_t device2 = AUDIO_DEVICE_NONE;
        ...
          //device2在前面没有被选中,而且没有设置这个setForceUse(setBluetoothA2dpOn(false)的时                 候,会设置
           //AudioSystem.FOR_MEDIA,AudioSystem.FORCE_NO_BT_A2DP标记)
        if ((device2 == AUDIO_DEVICE_NONE) &&
                (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
                (outputs.getA2dpOutput() != 0)) {
          //第一个优先项出现了.如果此时a2dp可用,直接到下面我用****标记的特殊情况
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
            if (device2 == AUDIO_DEVICE_NONE) {
                device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
            }
            if (device2 == AUDIO_DEVICE_NONE) {
                device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
            }
        }
        //这次轮到了AudioSystem.FOR_MEDIA,AudioSystem.FORCE_SPEAKER这种情况,speaker胜出
        if ((device2 == AUDIO_DEVICE_NONE) &&
            (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] == AUDIO_POLICY_FORCE_SPEAKER)) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
        }  
        //接下来就是依据优先级去选择设备了.规则就是
        //选中一个就结束,直接去和特殊设备做共存
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADSET;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_HEADSET;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADSET;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_HEADSET;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_ACCESSORY;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
        }
        if ((device2 == AUDIO_DEVICE_NONE) && (strategy != STRATEGY_SONIFICATION)) {
            // no sonification on aux digital (e.g. HDMI)
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
        }
        if ((device2 == AUDIO_DEVICE_NONE) &&
                (mForceUse[AUDIO_POLICY_FORCE_FOR_DOCK] == AUDIO_POLICY_FORCE_ANALOG_DOCK)) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
        }
  ***********************************************************************************
        //特殊情况
        int device3 = AUDIO_DEVICE_NONE;
        if (strategy == STRATEGY_MEDIA) {
          //如果arc,spdif,aux_line可用,赋值给device3
            // ARC, SPDIF and AUX_LINE can co-exist with others.
            device3 = availableOutputDevicesType & AUDIO_DEVICE_OUT_HDMI_ARC;
            device3 |= (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPDIF);
            device3 |= (availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_LINE);
        }
        //device2和arc,spdif,aux_line做一个共存
        device2 |= device3;
        // 一般情况下,在这之前device还是AUDIO_DEVICE_NONE
        device |= device2;
        // If hdmi system audio mode is on, remove speaker out of output list.
        if ((strategy == STRATEGY_MEDIA) &&
            (mForceUse[AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO] ==
                AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED)) {
            device &= ~AUDIO_DEVICE_OUT_SPEAKER;
        }
        } break;
case STRATEGY_MEDIA: {
        uint32_t device2 = AUDIO_DEVICE_NONE;
        ...
          //device2在前面没有被选中,而且没有设置这个setForceUse(setBluetoothA2dpOn(false)的时                 候,会设置
           //AudioSystem.FOR_MEDIA,AudioSystem.FORCE_NO_BT_A2DP标记)
        if ((device2 == AUDIO_DEVICE_NONE) &&
                (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
                (outputs.getA2dpOutput() != 0)) {
           //The first priority item appears. If a2dp is available at this time, go directly to the special case I marked with **** below 
            device2  =  availableOutputDevicesType  &  AUDIO_DEVICE_OUT_BLUETOOTH_A2DP ;
             if ( device2  ==  AUDIO_DEVICE_NONE ) {
                 device2  =  availableOutputDevicesType  &  AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES ; 
            } if ( device2 == AUDIO_DEVICE_NONE ) {
                 device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER ; 
        }
                  
            }
        //这次轮到了AudioSystem.FOR_MEDIA,AudioSystem.FORCE_SPEAKER这种情况,speaker胜出
        if (( device2  ==  AUDIO_DEVICE_NONE ) && 
            ( mForceUse [ AUDIO_POLICY_FORCE_FOR_MEDIA ] ==  AUDIO_POLICY_FORCE_SPEAKER )) {
             device2  =  availableOutputDevicesType  &  AUDIO_DEVICE_OUT_SPEAKER ; 
        }   //接下来It is to select the device according to the priority. The rule is //choose one and end, go directly to coexist with the special device if ( device2 == AUDIO_DEVICE_NONE ) {
             device2 = availableOutputDevicesType &
        
        
              AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADSET;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_HEADSET;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADSET;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_HEADSET;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_ACCESSORY;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
        }
        if ((device2 == AUDIO_DEVICE_NONE) && (strategy != STRATEGY_SONIFICATION)) {
            // no sonification on aux digital (e.g. HDMI)
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
        }
        if ((device2 == AUDIO_DEVICE_NONE) &&
                (mForceUse[AUDIO_POLICY_FORCE_FOR_DOCK] == AUDIO_POLICY_FORCE_ANALOG_DOCK)) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
        }
  ***********************************************************************************
        //特殊情况
        int device3 = AUDIO_DEVICE_NONE;
        if (strategy  ==  STRATEGY_MEDIA ) {
           //If arc, spdif, aux_line are available, assign them to device3 // 
            ARC , SPDIF and AUX_LINE can co-exist with others. 
            device3  =  availableOutputDevicesType  &  AUDIO_DEVICE_OUT_HDMI_ARC ;
             device3 | DI  = ( available_SPDevices  ;  Type VI_AUDIO )
             = ( availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_LINE ); 
        } //device2 and arc, spdif, aux_line make a coexistence device2 |= device3 ;
           
        
           // Generally, before this, the device is still AUDIO_DEVICE_NONE 
        device |= device2;
        // If hdmi system audio mode is on, remove speaker out of output list.
        if ((strategy == STRATEGY_MEDIA) &&
            (mForceUse[AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO] ==
                AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED)) {
            device &= ~AUDIO_DEVICE_OUT_SPEAKER;
        }
        } break;

According to the above code, make a brief summary:

The priority of playing music selection device is as follows


AUDIO_DEVICE_OUT_BLUETOOTH_A2DP
AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES(普通蓝牙耳机)
AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER(蓝牙小音箱)
//此处属于setForceUse的强制插队
(if FORCE_SPEAKER)AUDIO_DEVICE_OUT_SPEAKER(扬声器)
AUDIO_DEVICE_OUT_WIRED_HEADPHONE(普通耳机,只能听,不能操控播放)
AUDIO_DEVICE_OUT_LINE
AUDIO_DEVICE_OUT_WIRED_HEADSET(线控耳机)
AUDIO_DEVICE_OUT_USB_HEADSET(USB耳机)
...
AUDIO_DEVICE_OUT_SPEAKER(扬声器)AUDIO_DEVICE_OUT_BLUETOOTH_A2DP 
AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES (普通蓝牙耳机)
 AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER (蓝牙小音箱)
 //此处属于setForceUse的强制插队
( if  FORCE_SPEAKER ) AUDIO_DEVICE_OUT_SPEAKER (扬声器)
 AUDIO_DEVICE_OUT_WIRED_HEADPHONE (普通耳机,只能听,不能操控播放)
 AUDIO_DEVICE_OUT_LINE 
AUDIO_DEVICE_OUT_WIRED_HEADSET (线控耳机)
 AUDIO_DEVICE_OUT_USB_HEADSET ( USB headset ) 
... AUDIO_DEVICE_OUT_SPEAKER ( speaker )

After selecting the device, return to device, then getOutputForDevice

AudioPolicyManager::getOutputForDevice

//处理入参flags
if ((flags & AUDIO_OUTPUT_FLAG_XXX) != 0) {
    flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_XXX);
}
//咱一般不是AUDIO_OUTPUT_FLAG_DIRECT,当然是
goto non_direct_output;
...
//播放音乐什么之类的,mediaplayer已经做完decodec.这里一般都是pcm
    if (audio_is_linear_pcm(format)) {
        //根据指定的stream类型获取匹配的output.实际的路由改变需要等到startOutput被调用的时候
        //注意这个函数是getOutputsForDevice,要获取的是一些output,而我们当前讨论的函数是获取一个output.这些outputs从mOutputs中来.那么mOutputs来自哪里?
        SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
        // 从匹配到的outputs(请注意,是复数)中选出一个output
        output = selectOutput(outputs, flags, format);
    }
if (( flags  &  AUDIO_OUTPUT_FLAG_XXX ) !=  0 ) {
     flags  = ( audio_output_flags_t )( flags  |  AUDIO_OUTPUT_FLAG_XXX ); 
} 
// We are generally not AUDIO_OUTPUT_FLAG_DIRECT, of course goto non_direct_output ; Finished decodec. Here is generally pcm if ( audio_is_linear_pcm ( format )) {
         //Get the matching output according to the specified stream type. The actual routing change needs to wait until startOutput is called. //Note that this function is getOutputsForDevice, to get What is some output, and the function we are currently discussing is to get an output. These outputs come from mOutputs. So where does mOutputs come from?

 

    
        
        SortedVector < audio_io_handle_t >  outputs  =  getOutputsForDevice ( device , mOutputs );
         // Select an output from the matched outputs (please note that it is plural) output 
        =  selectOutput  ( outputs , flags , format ) ; 
    }

First solve the question: where does mOutputs come from?

void AudioPolicyManager::addOutput(audio_io_handle_t output, const sp<SwAudioOutputDescriptor>& outputDesc)
{
    outputDesc->setIoHandle(output);
    mOutputs.add(output, outputDesc);
    updateMono(output); // update mono status when adding to output list
    selectOutputForMusicEffects();
    nextAudioPortGeneration();
}
//哪里调用的?
AudioPolicyManager::AudioPolicyManager(){
  ...
    addOutput(output, outputDesc);
  ...
}
//是的.想起来了,就是在解析完audio_policy.conf之后!
//遍历mHwModules[i]->mOutputProfiles,然后加到mOutputs中去 AudioPolicyManager::addOutput(audio_io_handle_t output, const sp<SwAudioOutputDescriptor>& outputDesc)
{
    outputDesc->setIoHandle(output);
    mOutputs.add(output, outputDesc);
    updateMono(output); // update mono status when adding to output list
    selectOutputForMusicEffects();
    nextAudioPortGeneration();
}
//哪里调用的?
AudioPolicyManager::AudioPolicyManager(){ 
  ... addOutput ( output , outputDesc ); 
  ... 
} //Yes. Remember, after parsing audio_policy.conf! //Traverse mHwModules[i]->mOutputProfiles, then add to mOutputs to go
    

Continue. After selecting and matching some outputs from mOutputs, use selectOutput to select the one we really need.

selectOutput selection rules:


// select one output among several that provide a path to a particular device or set of
    // devices (the list was previously build by getOutputsForDevice()).
    // The priority is as follows:
    // 1: the output with the highest number of requested policy flags
    // 2: the output with the bit depth the closest to the requested one
    // 3: the primary output
    // 4: the first output in the list
翻译:
//在几个提供一个特定设备或一组路径的路径中选择一个输出
     //设备(该列表以前由getOutputsForDevice()构建)。
     //优先级如下:
     // 1:请求的policy flags数量最多的输出
     // 2:bit depth最接近请求的输出
     // 3:主输出
     // 4:列表中的第一个输出// select one output among several that provide a path to a particular device or set of 
    // devices (the list was previously built by getOutputsForDevice()). 
    // The priority is as follows: 
    // 1: the output with the highest number of requested policy flags 
    // 2: the output with the bit depth the closest to the requested one 
    // 3: the primary output 
    // 4: the first output in the list 
Translation :
 // Provide a specific device or Select an output 
     //device in the path of a set of paths (the list was previously constructed by getOutputsForDevice()). 
     // The priority is as follows: 
     // 1: The output with the largest number of requested policy flags 
     // 2: The output whose bit depth is closest to the request 
     // 3: The main output 
     // 4: The first output in the list

Then return to the selected output.

The whole getOutputForAttr is done.

The second step is to apply the output

After obtaining the output, it needs to be applied.

first


//代码摘自AudioTrack::createTrack_l()
...
AudioSystem::getLatency(output, &mAfLatency);
...
AudioSystem::getFrameCount(output, &mAfFrameCount);
...
AudioSystem::getFrameCountHAL(output, &afFrameCountHAL);
...
AudioSystem::getSamplingRate(output, &mAfSampleRate);//代码摘自AudioTrack::createTrack_l()
...
AudioSystem::getLatency(output, &mAfLatency);
...
AudioSystem::getFrameCount(output, &mAfFrameCount);
...
AudioSystem::getFrameCountHAL(output, &afFrameCountHAL);
...
AudioSystem::getSamplingRate(output, &mAfSampleRate);

Pay attention to the parameters called above, the first input parameter, the second output parameter!

The above process has obtained four values ​​of mAfLatency, mAfFrameCount, afFrameCountHAL, and mAfSampleRate. Note that all are

The beginning of af indicates the setting of the output on the AudioFlinger side. Make some corrections and adjustments to become mSampleRate, temp (comprehensive calculation of the three parameters mAfFrameCount, mAfFrameCount, and mAfLatency).

Then


sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
                                                      mSampleRate,
                                                      mFormat,
                                                      mChannelMask,
                                                      &temp,
                                                      &flags,
                                                      mSharedBuffer,
                                                      output,
                                                      mClientPid,
                                                      tid,
                                                      &mSessionId,
                                                      mClientUid,
                                                      &status,
                                                      mPortId);sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
                                                      mSampleRate,
                                                      mFormat,
                                                      mChannelMask,
                                                      &temp,
                                                      &flags,
                                                      mSharedBuffer,
                                                      output,
                                                      mClientPid,
                                                      tid,
                                                      &mSessionId,
                                                      mClientUid,
                                                      &status,
                                                      mPortId);

Let's jump to the AudioFlinger side to see


//AudioFlinger::createTrack
...
PlaybackThread *thread = checkPlaybackThread_l(output);
...//AudioFlinger::createTrack
...
PlaybackThread *thread = checkPlaybackThread_l(output);
...

First, checkPlaybackThread_l retrieves the playback thread corresponding to output from mPlaybackThreads.

This place needs to be reviewed, a piece of knowledge that I have analyzed before.

When AudioPolicyManager is constructed, it will be called one by one according to the configuration in audio_policy.conf

mpClientInterface->openOutput. (finally call AudioFlinger::openOutput_l)

some of


//AudioFlinger::openOutput_l
//找到匹配AudioDevice
AudioHwDevice *outHwDev = findSuitableHwDev_l(module, devices);
//打开硬件的输出流
status_t status = outHwDev->openOutputStream(
            &outputStream,
            *output,
            devices,
            flags,
            config,
            address.string());
//创建PlaybackThread
thread = new (PlaybackThread子类)((this, outputStream, *output, devices, mSystemReady))
//关联output和PlaybackThread
mPlaybackThreads.add(*output, thread);//AudioFlinger::openOutput_l 
//Find the matching AudioDevice 
AudioHwDevice  * outHwDev  =  findSuitableHwDev_l ( module , devices );
 //Open the hardware output stream 
status_t  status  =  outHwDev - > openOutputStream (
             & outputStream res ,
             * output ,
             devicesadd ,
             flags ,
             config ,
             .string ( ));
 //Create PlaybackThread thread = new
  ( PlaybackThread subclass )(( this , outputStream , * output , devices , mSystemReady ))
 //Associate output and PlaybackThread 
mPlaybackThreads . add ( * output , thread );

Let's do an experiment first:


实验步骤:
1.adb shell dumpsys media.audio_flinger
2.播放音乐
3.adb shell dumpsys media.audio_flingerExperimental steps: 
1.adb shell dumpsys media.audio_flinger 
2.Play music 
3.adb shell dumpsys media.audio_flinger

When not playing, the corresponding Output thread is like this:


Output thread 0xe7d836c0 type 0 (MIXER):
  Thread name: AudioOut_1D
  I/O handle: 29
  TID: 1090
  Standby: yes
  Sample rate: 48000 Hz
  HAL frame count: 1920
  HAL format: 0x1 (pcm16)
  HAL buffer size: 7680 bytes
  Channel count: 2
  Channel mask: 0x00000003 (front-left, front-right)
  Processing format: 0x1 (pcm16)
  Processing frame size: 4 bytes
  Pending config events: none
  Output device: 0 (NONE)
  Input device: 0 (NONE)
  Audio source: 0 (default)
  Normal frame count: 1920
  Last write occurred (msecs): 1949365
  Total writes: 215725
  Delayed writes: 0
  Blocked in write: no
  Suspend count: 0
  Sink buffer : 0xe8574000
  Mixer buffer: 0xe8576000
  Effect buffer: 0xe857b000
  Fast track availMask=0xfe
  Standby delay ns=3000000000
  AudioStreamOut: 0xe962d2d8 flags 0x8 (DEEP_BUFFER)
  Frames written: 414192000
  Suspended frames: 0
  Hal stream dump:
  Thread throttle time (msecs): 3470
  AudioMixer tracks: 0x00000000
  Master mono: off
  FastMixer not initialized
  Stream volumes in dB: 0:0, 1:-inf, 2:-inf, 3:-35, 4:-13, 5:-inf, 6:0, 7:-6, 8:-inf, 9:0, 10:-35, 11:0, 12:0
  Normal mixer raw underrun counters: partial=0 empty=0
  0 Tracks
  0 Effect ChainsOutput thread 0xe7d836c0 type 0 (MIXER):
  Thread name: AudioOut_1D
  I/O handle: 29
  TID: 1090
  Standby: yes
  Sample rate: 48000 Hz
  HAL frame count: 1920
  HAL format: 0x1 (pcm16)
  HAL buffer size: 7680 bytes
  Channel count: 2
  Channel mask: 0x00000003 (front-left, front-right)
  Processing format: 0x1 (pcm16)
  Processing frame size: 4 bytes
  Pending config events: none
  Output device: 0 (NONE)
  Input device: 0 (NONE)
  Audio source: 0 (default)
  Normal frame count: 1920
  Last write occurred (msecs): 1949365
  Total writes: 215725
  Delayed writes: 0
  Blocked in write: no
  Suspend count: 0
  Sink buffer : 0xe8574000
  Mixer buffer: 0xe8576000
  Effect buffer: 0xe857b000
  Fast track availMask=0xfe
  Standby delay ns=3000000000
  AudioStreamOut: 0xe962d2d8 flags 0x8 (DEEP_BUFFER)
  Frames written: 414192000
  Suspended frames: 0
  Hal stream dump:
  Thread throttle time (msecs): 3470
  AudioMixer tracks: 0x00000000
  Master mono: off
  FastMixer not initialized
  Stream volumes in dB: 0:0, 1:-inf, 2:-inf, 3:-35, 4:-13, 5:-inf, 6:0, 7:-6, 8:-inf, 9:0, 10:-35, 11:0, 12:0
  Normal mixer raw underrun counters: partial=0 empty=0
  0 Tracks
  0 Effect Chains

In playback state:


Output thread 0xe7d836c0 type 0 (MIXER):
  Thread name: AudioOut_1D
  I/O handle: 29
  TID: 1090
  Standby: no
  Sample rate: 48000 Hz
  HAL frame count: 1920
  HAL format: 0x1 (pcm16)
  HAL buffer size: 7680 bytes
  Channel count: 2
  Channel mask: 0x00000003 (front-left, front-right)
  Processing format: 0x1 (pcm16)
  Processing frame size: 4 bytes
  Pending config events: none
  Output device: 0x8 (WIRED_HEADPHONE)
  Input device: 0 (NONE)
  Audio source: 0 (default)
  Normal frame count: 1920
  Last write occurred (msecs): 20
  Total writes: 215830
  Delayed writes: 0
  Blocked in write: yes
  Suspend count: 0
  Sink buffer : 0xe8574000
  Mixer buffer: 0xe8576000
  Effect buffer: 0xe857b000
  Fast track availMask=0xfe
  Standby delay ns=3000000000
  AudioStreamOut: 0xe962d2d8 flags 0x8 (DEEP_BUFFER)
  Frames written: 414393600
  Suspended frames: 0
  Hal stream dump:
  Thread throttle time (msecs): 3470
  AudioMixer tracks: 0x00000001
  Master mono: off
  FastMixer not initialized
  Stream volumes in dB: 0:-24, 1:-inf, 2:-inf, 3:-3, 4:-13, 5:-inf, 6:0, 7:-24, 8:-inf, 9:-96, 10:-3, 11:0, 12:0
  Normal mixer raw underrun counters: partial=0 empty=0
  1 Tracks of which 1 are active
    Name Active Client Type      Fmt Chn mask Session fCount S F SRate  L dB  R dB    Server Main buf  Aux Buf Flags UndFrmCnt
       0    yes  25927    3 00000001 00000003    3761  15376 A 3 48000     0     0  0002F580 0xe8574000 0x0 0x001     17298 
  0 Effect ChainsOutput thread 0xe7d836c0 type 0 (MIXER):
  Thread name: AudioOut_1D
  I/O handle: 29
  TID: 1090
  Standby: no
  Sample rate: 48000 Hz
  HAL frame count: 1920
  HAL format: 0x1 (pcm16)
  HAL buffer size: 7680 bytes
  Channel count: 2
  Channel mask: 0x00000003 (front-left, front-right)
  Processing format: 0x1 (pcm16)
  Processing frame size: 4 bytes
  Pending config events: none
  Output device: 0x8 (WIRED_HEADPHONE)
  Input device: 0 (NONE)
  Audio source: 0 (default)
  Normal frame count: 1920
  Last write occurred (msecs): 20
  Total writes: 215830
  Delayed writes: 0
  Blocked in write: yes
  Suspend count: 0
  Sink buffer : 0xe8574000
  Mixer buffer: 0xe8576000
  Effect buffer: 0xe857b000
  Fast track availMask=0xfe
  Standby delay ns=3000000000
  AudioStreamOut: 0xe962d2d8 flags 0x8 (DEEP_BUFFER)
  Frames written: 414393600
  Suspended frames: 0
  Hal stream dump:
  Thread throttle time (msecs): 3470
  AudioMixer tracks: 0x00000001
  Master mono: off
  FastMixer not initialized
  Stream volumes in dB: 0:-24, 1:-inf, 2:-inf, 3:-3, 4:-13, 5:-inf, 6:0, 7:-24, 8:-inf, 9:-96, 10:-3, 11:0, 12:0
  Normal mixer raw underrun counters: partial=0 empty=0
  1 Tracks of which 1 are active
    Name Active Client Type      Fmt Chn mask Session fCount S F SRate  L dB  R dB    Server Main buf  Aux Buf Flags UndFrmCnt
       0    yes  25927    3 00000001 00000003    3761  15376 A 3 48000     0     0  0002F580 0xe8574000 0x0 0x001     17298 
  0 Effect Chains

Most notable changes:

1.Standby: yes->no

2.Output device: NONE->WIRED_HEADPHONE (I did test with headphones on)

3. More "1 Tracks of which 1 are active..."

However, the Output thread 0xe7d836c0 has not changed (the this pointer of the playbackThread printed by the dump function).

I/O handle: 29, no change. Turn on traceback mode:


//AudioFlinger::ThreadBase::dumpBase打印:
dprintf(fd, "  I/O handle: %d\n", mId);
//AudioFlinger::ThreadBase::ThreadBase赋值
mId(id),
//嗯,基类构造函数
//AudioFlinger::PlaybackThread::PlaybackThread
:   ThreadBase(audioFlinger, id
//继续找到PlaybackThread的子类构造函数
//AudioFlinger::MixerThread::MixerThread
AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
        audio_io_handle_t id, audio_devices_t device, bool systemReady, type_t type)
    :   PlaybackThread(audioFlinger, output, id, device, type, systemReady),//AudioFlinger::ThreadBase::dumpBase print: 
dprintf ( fd , " I/O handle: %d\n" , mId );
 //AudioFlinger::ThreadBase::ThreadBase assignment 
mId ( id ),
 //Well, base Class constructor 
//AudioFlinger::PlaybackThread::PlaybackThread 
:    ThreadBase ( audioFlinger , id 
//Continue to find the subclass constructor of PlaybackThread 
//AudioFlinger::MixerThread::MixerThread 
AudioFlinger::MixerThread::MixerThread ( const  sp < AudioFlinger > &  audioFlinger , AudioStreamOut *  output,
        audio_io_handle_t id, audio_devices_t device, bool systemReady, type_t type)
    :   PlaybackThread(audioFlinger, output, id, device, type, systemReady),

就是它了.构造MixerThread的时候,传进来的第三个参数.回过头去看前文中分析的openOutput_l方法


//创建PlaybackThread
thread = new (PlaybackThread子类)((this, outputStream, *output, devices, mSystemReady))//创建PlaybackThread
thread = new (PlaybackThread子类)((this, outputStream, *output, devices, mSystemReady))

是它,就是它:

*output

The final source is also nearby:


 //AudioFlinger::openOutput_l
*output = nextUniqueId(AUDIO_UNIQUE_ID_USE_OUTPUT); //AudioFlinger::openOutput_l
*output = nextUniqueId(AUDIO_UNIQUE_ID_USE_OUTPUT);

It is generated by this nextUniqueId function! Looking back, a brief summary is

Generate the output number, then create a playbackThread (subclass), and finally add it to AudioFlinger's mPlaybackThreads array.

Ok, let's go back to the previous topic. After selecting the output

checkPlaybackThread_l will retrieve the PlaybackThread corresponding to the previously created output.

next


//AudioFlinger::createTrack
track = thread->createTrack_l(client, streamType, sampleRate, format,
                channelMask, frameCount, sharedBuffer, lSessionId, flags, tid,
                clientUid, &lStatus, portId);//AudioFlinger::createTrack
track = thread->createTrack_l(client, streamType, sampleRate, format,
                channelMask, frameCount, sharedBuffer, lSessionId, flags, tid,
                clientUid, &lStatus, portId);

创建AudioFlinger::Track对象返回给AudioTrack做为

服务端在客户端的代表(通信兵,卧底?),不详述了...不然就跑题了.

 

接下来,我们讲讲真正开始播放的时候的情况(抱歉,这个转折转的有点勉强).

第三步 开始播放

​ How does the Android system go from the start of AudioTrack to the Track::start of AudioFlinger? Let’s skip it. After all, today’s topic is the choice of equipment. , call PlaybackThread::addTrack_l to wake up the sleeping PlaybackThread, and then call our important


status_t AudioFlinger::PlaybackThread::addTrack_l(const sp<Track>& track)
{
    status = AudioSystem::startOutput(mId, track->streamType(),
                                              track->sessionId());
  ...
}status_t AudioFlinger::PlaybackThread::addTrack_l(const sp<Track>& track)
{
    status = AudioSystem::startOutput(mId, track->streamType(),
                                              track->sessionId());
  ...
}

We have discussed mId before, which is the serial number, logo and output of PlaybackThread!

Next we move on to our lovely startOutput function

status_t AudioPolicyManager::startOutput(audio_io_handle_t output,
                                             audio_stream_type_t stream,
                                             audio_session_t session)
{
    //根据output这个id.找出outputDesc
    ssize_t index = mOutputs.indexOfKey(output);
    sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
    //
    if (outputDesc->mPolicyMix != NULL) {
      ...
    } else if (mOutputRoutes.hasRouteChanged(session)) {
        //选用新的设备
        newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
        checkStrategyRoute(getStrategy(stream), output);
    }
    ...
    status_t status = startSource(outputDesc, stream, newDevice, address, &delayMs);
    ...
} AudioPolicyManager::startOutput(audio_io_handle_t output,
                                             audio_stream_type_t stream,
                                             audio_session_t session)
{
    //根据output这个id.找出outputDesc
    ssize_t index = mOutputs.indexOfKey(output);
    sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
    //
    if (outputDesc->mPolicyMix != NULL) {
      ...
    } else if (mOutputRoutes.hasRouteChanged(session)) {
        //选用新的设备
        newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
        checkStrategyRoute(getStrategy(stream), output);
    }
    ...
    status_t status = startSource(outputDesc, stream, newDevice, address, &delayMs);
    ...
}

What determines whether we choose newDevice when we startSource?

is it: mOutputRoutes.hasRouteChanged(session):


bool SessionRouteMap::hasRouteChanged(audio_session_t session)
{
    if (indexOfKey(session) >= 0) {
        if (valueFor(session)->mChanged) {
            valueFor(session)->mChanged = false;
            return true;
        }
    }
    return false;
}bool SessionRouteMap::hasRouteChanged(audio_session_t session)
{
    if (indexOfKey(session) >= 0) {
        if (valueFor(session)->mChanged) {
            valueFor(session)->mChanged = false;
            return true;
        }
    }
    return false;
}

Two conditions: a.session belongs to mOutputRoutes. b.valueFor(session)->mChanged = true.

status_t AudioPolicyManager::getOutputForAttr(...){
  ...
        mOutputRoutes.addRoute(session, *stream, SessionRoute::SOURCE_TYPE_NA, deviceDesc, uid);
  ...       
} AudioPolicyManager::getOutputForAttr(...){
  ...
        mOutputRoutes.addRoute(session, *stream, SessionRoute::SOURCE_TYPE_NA, deviceDesc, uid);
  ...       
}

void SessionRouteMap::addRoute:


void SessionRouteMap::addRoute(audio_session_t session,
                               audio_stream_type_t streamType,
                               audio_source_t source,
                               const sp<DeviceDescriptor>& descriptor,
                               uid_t uid)
{
    ...
     //之前是否存在?
    sp<SessionRoute> route = indexOfKey(session) >= 0 ? valueFor(session) : 0;
    //之前已经存在此route
    if (route != 0) {
        //相关descriptor有变化
        if (((route->mDeviceDescriptor == 0) && (descriptor != 0)) ||
                ((route->mDeviceDescriptor != 0) &&
                 ((descriptor == 0) || (!route->mDeviceDescriptor->equals(descriptor))))) { 
            route->mChanged = true;
        }
        route->mRefCount++;
        route->mDeviceDescriptor = descriptor;
    } else {
        //之前不存在此route
        route = new SessionRoute(session, streamType, source, descriptor, uid);
        route->mRefCount++;
        add(session, route);
        if (descriptor != 0) {
            route->mChanged = true;
        }
    }
}void SessionRouteMap::addRoute(audio_session_t session,
                               audio_stream_type_t streamType,
                               audio_source_t source,
                               const sp<DeviceDescriptor>& descriptor,
                               uid_t uid)
{
    ...
     //之前是否存在?
    sp<SessionRoute> route = indexOfKey(session) >= 0 ? valueFor( session ) : 0 ;
     //This route already existed before 
    if ( route  !=  0 ) {
         //The relevant descriptor has changed 
        if ((( route -> mDeviceDescriptor  ==  0 ) && ( descriptor  !=  0 )) || 
                ( ( route -> mDeviceDescriptor  !=  0 ) && 
                 (( descriptor  ==  0 ) || ( ! route -> mDeviceDescriptor->equals(descriptor))))) { 
            route->mChanged = true;
        }
        route->mRefCount++;
        route->mDeviceDescriptor = descriptor;
    } else {
        //之前不存在此route
        route = new SessionRoute(session, streamType, source, descriptor, uid);
        route->mRefCount++;
        add(session, route);
        if (descriptor != 0) {
            route->mChanged = true;
        }
    }
}

The session here is actually the route index provided by AudioPolicy to the client side, which is similar to the previous output. My current understanding is to determine whether it is necessary to re-plan the Audio Route.

With tears in my eyes, go back and trace the source of the session parameters:

(Here we use the use of AudioTrack in MediaPlayerService.cpp as a typical example)


status_t MediaPlayerService::AudioOutput::open()
{
    ...
        t = new AudioTrack(
                    ...
                    mSessionId,
                    ...);
    ...
}
//一番寻找之后
status_t MediaPlayer::setAudioSessionId(audio_session_t sessionId)
//或者
mAudioSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);status_t MediaPlayerService::AudioOutput::open()
{
    ...
        t = new AudioTrack(
                    ...
                    mSessionId,
                    ...);
    ...
}
//一番寻找之后
status_t MediaPlayer::setAudioSessionId(audio_session_t sessionId)
//或者
mAudioSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);

这个session要么来自用户MediaPlayer::setAudioSessionId的调用,要么是自己用newAudioUniqueId(new一个唯一的ID)生成的.

AudioManager里也有类似的东西:


//AudioManager.java
public int generateAudioSessionId() {
        int session = AudioSystem.newAudioSessionId();
        if (session > 0) {
            return session;
        } else {
            Log.e(TAG, "Failure to generate a new audio session ID");
            return ERROR;
        }
    }
public static final int AUDIO_SESSION_ID_GENERATE = AudioSystem.AUDIO_SESSION_ALLOCATE;//AudioManager.java
public int generateAudioSessionId() {
        int session = AudioSystem.newAudioSessionId();
        if (session > 0) {
            return session;
        } else {
            Log.e(TAG, "Failure to generate a new audio session ID");
            return ERROR;
        }
    }
public static final int AUDIO_SESSION_ID_GENERATE = AudioSystem.AUDIO_SESSION_ALLOCATE;

That's it, let's talk

Called getOutputForAttr before, from this point of view, the first condition is met.

The second condition is: either it is newly added, or although the route already exists, but the descriptor has changed, it is judged as mChanged.

(mChanged will be changed back to false when hasRouteChanged.)

Only when the route is used for the first time, or the same route, but the descriptor has changed, getNewOutputDevice will be needed at the time of startOutput! At other times, the reference count (mRefCount) of the route will be increased by one.

Let's first look at the situation where we need to create a new device!

getNewOutputDevice

Check AudioPatch first


//outputDesc的patchHandle确实是mAudioPatches的一个索引
ssize_t index = mAudioPatches.indexOfKey(outputDesc->getPatchHandle());
    if (index >= 0) {
        //取出mAudioPatches对应的AudioPatch(audioPatch就是一个有source和sink的一个结构体)
        sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
        //mUidCached = getuid().没找到哪里来的
        if (patchDesc->mUid != mUidCached) {
            ALOGV("getNewOutputDevice() device %08x forced by patch %d",
                  outputDesc->device(), outputDesc->getPatchHandle());
            return outputDesc->device();
        }
    }//The patchHandle of outputDesc is indeed an index of mAudioPatches ssize_t 
index  =  mAudioPatches  . indexOfKey ( outputDesc - > getPatchHandle ());
     if ( index  >=  0 ) {
         //Take out the AudioPatch corresponding to mAudioPatches (audioPatch is a source and sink A structure) 
        sp < AudioPatch >  patchDesc  =  mAudioPatches . valueAt ( index );
         //mUidCached = getuid(). Didn't find where it came from 
        if ( patchDesc -> mUid  !=  mUidCached) {
            ALOGV("getNewOutputDevice() device %08x forced by patch %d",
                  outputDesc->device(), outputDesc->getPatchHandle());
            return outputDesc->device();
        }
    }

My understanding is a special means of configuring audioSource and audioSink for the app layer.

If there is no AudioPatch, select newDevice according to the following priority


哪个Strategy是active状态,则使用该Strategy的device(调用getDeviceForStrategy)
优先级:
1. STRATEGY_ENFORCED_AUDIBLE && AUDIO_POLICY_FORCE_SYSTEM_ENFORCED
2. STRATEGY_PHONE || isInCall()
3. STRATEGY_SONIFICATION
4. STRATEGY_ENFORCED_AUDIBLE
5. STRATEGY_ACCESSIBILITY
6. STRATEGY_SONIFICATION_RESPECTFUL
7. STRATEGY_MEDIA
8. STRATEGY_DTMF
9. STRATEGY_TRANSMITTED_THROUGH_SPEAKER
10. STRATEGY_REROUTING哪个Strategy是active状态,则使用该Strategy的device (调用getDeviceForStrategy )
优先级:
 1.  STRATEGY_ENFORCED_AUDIBLE  &&  AUDIO_POLICY_FORCE_SYSTEM_ENFORCED 
2.  STRATEGY_PHONE  ||  isInCall ()
 3.  STRATEGY_SONIFICATION 
4.  STRATEGY_ENFORCED_AUDIBLE 
5.  STRATEGY_ACCESSIBILITY 
6.  STRATEGY_SONIFICATION_RESPECTFUL 
7.  STRATEGY_MEDIA 
8.  STRATEGY_DTMF 
9.  STRATEGY_TRANSMITTED_THROUGH_SPEAKER 
10.  STRATEGY_REROUTING

checkStrategyRoute

Parameter one: getStrategy(stream)

Parameter two: output

//找到当前stream对应的Strategy对应的default device
getDeviceForStrategy
//找出所有在使用此device输出的所有output(之前getOutputForAttr那遍做的不算?)
outputs = getOutputsForDevice(device, mOutputs);
for(size_t j = 0; j < mOutputs.size(); j++) {
    //跳过入参output(此次check的不包括startOutput传入的output)
    //如果当前Strategy是active
    isStrategyActive(outputDesc, (routing_strategy)strategy))
    {
        // If the default device for this strategy is on another output mix,
        // invalidate all tracks in this strategy to force re connection.
        // Otherwise select new device on the output mix.
        if (outputs.indexOf(mOutputs.keyAt(j)) < 0) {
            for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
                if (getStrategy((audio_stream_type_t)stream) == strategy) {
                    mpClientInterface->invalidateStream((audio_stream_type_t)stream);
                }
            }
        } else {
            audio_devices_t newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
            setOutputDevice(outputDesc, newDevice, false);
        }
    }
}
​
getDeviceForStrategy 
//Find out all the outputs that are output by this device (the previous getOutputForAttr does not count?) 
outputs  =  getOutputsForDevice ( device , mOutputs );
 for ( size_t  j  =  0 ; j  <  mOutputs . size (); j ++ ) {
     //Skip input parameter output (this check does not include the output passed in startOutput) 
    //If the current Strategy is active 
    isStrategyActive ( outputDesc , ( routing_strategy ) strategy )) 
    { // If the default device for this strategy is on another output mix,
        
        // invalidate all tracks in this strategy to force re connection.
        // Otherwise select new device on the output mix.
        if (outputs.indexOf(mOutputs.keyAt(j)) < 0) {
            for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
                if (getStrategy((audio_stream_type_t)stream) == strategy) {
                    mpClientInterface->invalidateStream((audio_stream_type_t)stream);
                }
            }
        } else {
            audio_devices_t newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
            setOutputDevice(outputDesc, newDevice, false);
        }
    }
}
​

I'll go! It's so complicated, if you don't understand it, don't worry about it.

startSource


//cannot start playback of STREAM_TTS if any other output is being used
//如果正在使用任何其他输出,则无法开始播放STREAM_TTS
//(不知道为什么这么做)前面这一小段代码就是用handleEventForBeacon去处理tts的mute.
//STARTING_BEACON播放信标(TTS),STARTING_OUTPUT播放其他的,mute TTS.
(代码略)
//如果output inactive而且没有存在的audio patch,强制改变device
bool force = !outputDesc->isActive() &&
    (outputDesc->getPatchHandle() == AUDIO_PATCH_HANDLE_NONE);
//在请求的输出上增加此流的使用次数:
//这个usage count和通过startOutput和stopoutput正确控制硬件输出路由所必须的duplicated output
//以及hardware output一样.
outputDesc->changeRefCount(stream, 1);
...
if (outputDesc->mRefCount[stream] == 1 || device != AUDIO_DEVICE_NONE) {
    //device还是AUDIO_DEVICE_NONE.再重新选设备
    if (device == AUDIO_DEVICE_NONE) {
         device = getNewOutputDevice(outputDesc, false /*fromCache*/);
    }
    //处理waitMs(延迟输出?)
    (代码略)
    //setOutputDevice之后,返回muteWaitMs
    uint32_t muteWaitMs = setOutputDevice(outputDesc, device, force, 0, NULL, address);
    // 处理通话中突然输出的Sonification
    if (isInCall()) {
        handleIncallSonification(stream, true, false);
    }
    //应用音量规则
    checkAndSetVolume(stream,
                          mVolumeCurves->getVolumeIndex(stream, outputDesc->device()),
                          outputDesc,
                          outputDesc->device());
    ...
    //设定delayMs
    if (waitMs > muteWaitMs) {
            *delayMs = waitMs - muteWaitMs;
    }
    return NO_ERROR;
}//cannot start playback of STREAM_TTS if any other output is being used 
//If any other output is being used, STREAM_TTS cannot start 
playing . 
//STARTING_BEACON plays beacon (TTS), STARTING_OUTPUT plays other, mute TTS. 
( code omitted )
 //If output inactive and no existing audio patch, force to change device 
bool  force  =  ! outputDesc -> isActive () && 
    ( outputDesc -> getPatchHandle () ==  AUDIO_PATCH_HANDLE_NONE );
 //Increase the number of uses of this stream on the requested output: 
//This usage count and the duplicated output necessary to correctly control the hardware output routing through startOutput and stopoutput 
//And hardware output Same.
outputDesc->changeRefCount(stream, 1);
...
if (outputDesc->mRefCount[stream] == 1 || device != AUDIO_DEVICE_NONE) {
    //device还是AUDIO_DEVICE_NONE.再重新选设备
    if (device == AUDIO_DEVICE_NONE) {
         device = getNewOutputDevice(outputDesc, false /*fromCache*/);
    }
    //处理waitMs(延迟输出?)
    (代码略)
     //After setOutputDevice, return muteWaitMs 
    uint32_t  muteWaitMs  =  setOutputDevice ( outputDesc , device , force , 0 , NULL , address );
     // Handle the sudden output of Sonification during the call 
    if ( isInCall ()) {
         handleIncallSonification ( stream , true , false ) ; 
    } //Apply volume rules checkAndSetVolume ( stream ,
                           mVolumeCurves -> getVolumeIndex
    
    (stream, outputDesc->device()),
                          outputDesc,
                          outputDesc->device());
    ...
    //设定delayMs
    if (waitMs > muteWaitMs) {
            *delayMs = waitMs - muteWaitMs;
    }
    return NO_ERROR;
}

Let's analyze the setOutputDevice mentioned in the previous article in detail.

uint32_t AudioPolicyManager::setOutputDevice(const sp<AudioOutputDescriptor>& outputDesc,
                                             audio_devices_t device,
                                             bool force,
                                             int delayMs,
                                             audio_patch_handle_t *patchHandle,
                                             const char* address)
{
    AudioParameter param;
    uint32_t muteWaitMs;
    //Duplicated output,output1和output2各来一遍setOutputDevice
    if (outputDesc->isDuplicated()) {
        muteWaitMs = setOutputDevice(outputDesc->subOutput1(), device, force, delayMs);
        muteWaitMs += setOutputDevice(outputDesc->subOutput2(), device, force, delayMs);
        return muteWaitMs;
    }
    ...
    if (device == AUDIO_DEVICE_NONE) {
        resetOutputDevice(outputDesc, delayMs, NULL);
    } else {
        DeviceVector deviceList;
        if ((address == NULL) || (strlen(address) == 0)) {
            //mAvailableOutputDevices在APM构造的时候就已经准备好了
            //setDeviceConnectionStateInt中也会对新设备做add
            deviceList = mAvailableOutputDevices.getDevicesFromType(device);
        } else {
            deviceList = mAvailableOutputDevices.getDevicesFromTypeAddr(device, String8(address));
        }
        if (!deviceList.isEmpty()) {
            struct audio_patch patch;
            outputDesc->toAudioPortConfig(&patch.sources[0]);
            patch.num_sources = 1;
            patch.num_sinks = 0;
            for (size_t i = 0; i < deviceList.size() && i < AUDIO_PATCH_PORTS_MAX; i++)             {
                deviceList.itemAt(i)->toAudioPortConfig(&patch.sinks[i]);
                patch.num_sinks++;
            }
            //从mAudioPatches中取出patch的index
            ssize_t index;
            if (patchHandle && *patchHandle != AUDIO_PATCH_HANDLE_NONE) {
                index = mAudioPatches.indexOfKey(*patchHandle);
            } else {
                index = mAudioPatches.indexOfKey(outputDesc->getPatchHandle());
            }
            //处理afPatchHandle
            sp< AudioPatch> patchDesc;
            audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
            if (index >= 0) {
                patchDesc = mAudioPatches.valueAt(index);
                afPatchHandle = patchDesc->mAfPatchHandle;
            }
            //巨长的函数,看得我脑袋疼.
            status_t status = mpClientInterface->createAudioPatch(&patch,
                                                                   &afPatchHandle,
                                                                   delayMs);
          if (status == NO_ERROR) {
                if (index < 0) {
                    patchDesc = new AudioPatch(&patch, mUidCached);
                    addAudioPatch(patchDesc->mHandle, patchDesc);
                } else {
                    patchDesc->mPatch = patch;
                }   
                patchDesc->mAfPatchHandle = afPatchHandle;
                if (patchHandle) {
                    *patchHandle = patchDesc->mHandle;
                }   
                outputDesc->setPatchHandle(patchDesc->mHandle);
                nextAudioPortGeneration();
                mpClientInterface->onAudioPatchListUpdate();
          }
        }
    }
} AudioPolicyManager::setOutputDevice(const sp<AudioOutputDescriptor>& outputDesc,
                                             audio_devices_t device,
                                             bool force,
                                             int delayMs,
                                             audio_patch_handle_t *patchHandle,
                                             const char* address)
{
    AudioParameter param;
    uint32_t muteWaitMs;
    //Duplicated output,output1和output2各来一遍setOutputDevice
    if (outputDesc->isDuplicated()) {
        muteWaitMs = setOutputDevice(outputDesc->subOutput1(), device, force, delayMs);
        muteWaitMs += setOutputDevice(outputDesc->subOutput2(), device, force, delayMs);
        return muteWaitMs;
    }
    ...
    if( device  ==  AUDIO_DEVICE_NONE ) {
         resetOutputDevice ( outputDesc , delayMs , NULL ); 
    } else {
         DeviceVector  deviceList ;
         if (( address  ==  NULL ) || ( strlen ( address ) ==  0 )) {
             //mAvailableOutputDevices constructed in APM It's ready at this time 
            //setDeviceConnectionStateInt will also add 
            deviceList  =  mAvailableOutputDevices for new devices .getDevicesFromType(device);
        } else {
            deviceList = mAvailableOutputDevices.getDevicesFromTypeAddr(device, String8(address));
        }
        if (!deviceList.isEmpty()) {
            struct audio_patch patch;
            outputDesc->toAudioPortConfig(&patch.sources[0]);
            patch.num_sources = 1;
            patch.num_sinks = 0;
            for (size_t i = 0; i < deviceList.size() && i < AUDIO_PATCH_PORTS_MAX; i++)             {
                deviceList.itemAt(i)->toAudioPortConfig(&patch.sinks[i]);
                patch.num_sinks++;
            }
            //从mAudioPatches中取出patch的index
            ssize_t index;
            if (patchHandle && *patchHandle != AUDIO_PATCH_HANDLE_NONE) {
                index = mAudioPatches.indexOfKey(*patchHandle);
            } else {
                index = mAudioPatches.indexOfKey(outputDesc->getPatchHandle());
            } //Process afPatchHandle sp < AudioPatch > patchDesc ;
             audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE ;
             if ( index >= 0 ) {
                 patchDesc = mAudioPatches . valueAt ( index );
                 afPatchHandle = patchDesc -> mAfPatchHandle function , 
            } // huge My head hurts. status_t status = mpClientInterface ->
            
                       
            
                createAudioPatch(&patch,
                                                                   &afPatchHandle,
                                                                   delayMs);
          if (status == NO_ERROR) {
                if (index < 0) {
                    patchDesc = new AudioPatch(&patch, mUidCached);
                    addAudioPatch(patchDesc->mHandle, patchDesc);
                } else {
                    patchDesc->mPatch = patch;
                }   
                patchDesc->mAfPatchHandle = afPatchHandle;
                if (patchHandle) {
                    *patchHandle = patchDesc->mHandle;
                }   
                outputDesc->setPatchHandle(patchDesc->mHandle);
                nextAudioPortGeneration();
                mpClientInterface->onAudioPatchListUpdate();
          }
        }
    }
}

The analysis of Audio Patch is not strong, the article below is very good.

 

Some analysis of Audio Patch and Patch Panel on Android5.0

 

After a bunch of calls flow, the code goes here:

status_t AudioFlinger::PlaybackThread::createAudioPatch_l(const struct audio_patch *patch,audio_patch_handle_t *handle) {
  ...
        if (mOutput->audioHwDev->supportsAudioPatches()) {
        sp<DeviceHalInterface> hwDevice = mOutput->audioHwDev->hwDevice();
        status = hwDevice->createAudioPatch(patch->num_sources,
                                            patch->sources,
                                            patch->num_sinks,
                                            patch->sinks,
                                            handle);
    } else {
        char *address;
        if (strcmp(patch->sinks[0].ext.device.address, "") != 0) {
            //FIXME: we only support address on first sink with HAL version < 3.0
            address = audio_device_address_to_parameter(
                                                        patch->sinks[0].ext.device.type,
                                                        patch->sinks[0].ext.device.address);
        } else {
            address = (char *)calloc(1, 1);
        }
        AudioParameter param = AudioParameter(String8(address));
        free(address);
        param.addInt(String8(AudioParameter::keyRouting), (int)type);
        status = mOutput->stream->setParameters(param.toString());
        *handle = AUDIO_PATCH_HANDLE_NONE;
    }
  ...
} AudioFlinger::PlaybackThread::createAudioPatch_l(const struct audio_patch *patch,audio_patch_handle_t *handle) {
  ...
        if (mOutput->audioHwDev->supportsAudioPatches()) {
        sp<DeviceHalInterface> hwDevice = mOutput->audioHwDev->hwDevice();
        status = hwDevice->createAudioPatch(patch->num_sources,
                                            patch->sources,
                                            patch->num_sinks,
                                            patch->sinks,
                                            handle);
    } else {
        char *address;
        if (strcmp(patch->sinks[0].ext.device.address, "") != 0) {
            //FIXME: we only support address on first sink with HAL version < 3.0
            address = audio_device_address_to_parameter(
                                                        patch->sinks[0].ext.device.type,
                                                        patch->sinks[0].ext.device.address);
        } else {
            address = (char *)calloc(1, 1);
        }
        AudioParameter param = AudioParameter(String8(address));
        free(address);
        param.addInt(String8(AudioParameter::keyRouting), (int)type);
        status = mOutput->stream->setParameters(param.toString());
        *handle = AUDIO_PATCH_HANDLE_NONE;
    }
  ...
}

If supportsAudioPatches, then continue to createAudioPatch. It will go through DeviceHalHidl::createAudioPatch under libaudiohal and Device.cpp under hardware/interfaces/audio/2.0/default respectively. Then enter

The create_audio_patch function implemented in audio_hw.c under hardware/qcom/audio/hal (assuming it is a Qualcomm platform).

If the above process is not clear, you can read my article:

https://blog.csdn.net/bberdong/article/details/79472208

I didn't find create_audio_patch in the implementation of audio_hw.c in my code, is it not supported?


status_t DeviceHalHidl::supportsAudioPatches(bool *supportsPatches) {
    if (mDevice == 0) return NO_INIT;
    return processReturn("supportsAudioPatches", mDevice->supportsAudioPatches(), supportsPatches);
}
Return<bool> Device::supportsAudioPatches() {
    return version() >= AUDIO_DEVICE_API_VERSION_3_0;
}
//Device.h
uint32_t version() const { return mDevice->common.version; }status_t DeviceHalHidl::supportsAudioPatches(bool *supportsPatches) {
    if (mDevice == 0) return NO_INIT;
    return processReturn("supportsAudioPatches", mDevice->supportsAudioPatches(), supportsPatches);
}
Return<bool> Device::supportsAudioPatches() {
    return version() >= AUDIO_DEVICE_API_VERSION_3_0;
}
//Device.h
uint32_t version() const { return mDevice->common.version; }

Then I took a look, qcom/audio/hal/audio_hw.c, AUDIO_DEVICE_API_VERSION20. It seems that it is really not supported.

Great, found an excuse not to continue with create_audio_patch!

Then let's continue to look at setParameters.

First came to StreamHalHidl::setParameters, then hidl's Stream::setParameters, StreamOut::setParameters, and then

mStreamCommon->setParameters, then Stream::halSetParameters

int Stream::halSetParameters(const char* keysAndValues) {
    return mStream->set_parameters(mStream, keysAndValues);
} Stream::halSetParameters(const char* keysAndValues) {
    return mStream->set_parameters(mStream, keysAndValues);
}

mStream comes from the constructor

Stream::Stream(audio_stream_t* stream)
        : mStream(stream) {
}(audio_stream_t* stream)
        : mStream(stream) {
}

audio_stream_t from


StreamOut::StreamOut(const sp<Device>& device, audio_stream_out_t* stream)
    : ...
      mStreamCommon(new Stream(&stream->common)),                                             ...StreamOut::StreamOut(const sp<Device>& device, audio_stream_out_t* stream)
    : ...
      mStreamCommon(new Stream(&stream->common)),                                             ...

When StreamOut is constructed. That is:

Return<void> Device::openOutputStream(int32_t ioHandle,
                                      const DeviceAddress& device,
                                      const AudioConfig& config,
                                      AudioOutputFlag flags,
                                      openOutputStream_cb _hidl_cb) {
    audio_config_t halConfig;
    HidlUtils::audioConfigToHal(config, &halConfig);
    audio_stream_out_t* halStream;                                                                                                                             
    ALOGV(
        "open_output_stream handle: %d devices: %x flags: %#x "
        "srate: %d format %#x channels %x address %s",
        ioHandle, static_cast<audio_devices_t>(device.device),
        static_cast<audio_output_flags_t>(flags), halConfig.sample_rate,
        halConfig.format, halConfig.channel_mask,
        deviceAddressToHal(device).c_str());
    int status = mDevice->open_output_stream(
        mDevice, ioHandle, static_cast<audio_devices_t>(device.device),
        static_cast<audio_output_flags_t>(flags), &halConfig, &halStream,
        deviceAddressToHal(device).c_str());
    ALOGV("open_output_stream status %d stream %p", status, halStream);
    sp<IStreamOut> streamOut;
    if (status == OK) {
        streamOut = new StreamOut(this, halStream);
    }
    AudioConfig suggestedConfig;
    HidlUtils::audioConfigFromHal(halConfig, &suggestedConfig);
    _hidl_cb(analyzeStatus("open_output_stream", status), streamOut,
             suggestedConfig);
    return Void();
}<void> Device::openOutputStream(int32_t ioHandle,
                                      const DeviceAddress& device,
                                      const AudioConfig& config,
                                      AudioOutputFlag flags,
                                      openOutputStream_cb _hidl_cb) {
    audio_config_t halConfig;
    HidlUtils::audioConfigToHal(config, &halConfig);
    audio_stream_out_t* halStream;                                                                                                                             
    ALOGV(
        "open_output_stream handle: %d devices: %x flags: %#x "
        "srate: %d format %#x channels %x address %s",
        ioHandle, static_cast<audio_devices_t>(device.device),
        static_cast<audio_output_flags_t>(flags), halConfig.sample_rate,
        halConfig.format, halConfig.channel_mask,
        deviceAddressToHal(device).c_str());
    int status = mDevice->open_output_stream(
        mDevice, ioHandle, static_cast<audio_devices_t>(device.device),
        static_cast<audio_output_flags_t>(flags), &halConfig, &halStream,
        deviceAddressToHal(device).c_str());
    ALOGV("open_output_stream status %d stream %p", status, halStream);
    sp<IStreamOut> streamOut;
    if (status == OK) {
        streamOut = new StreamOut(this, halStream);
    }
    AudioConfig suggestedConfig;
    HidlUtils::audioConfigFromHal(halConfig, &suggestedConfig);
    _hidl_cb(analyzeStatus("open_output_stream", status), streamOut,
             suggestedConfig);
    return Void();
}

That is, the out->stream.common object brought back by adev_open_output_stream in audio_hw.c is not an assignment relationship, but a corresponding relationship (stub).

in

//qcom/audio/hal/audio_hw.c
out->stream.common.set_parameters = out_set_parameters;
out->stream.common.set_parameters = out_set_parameters;

but

mStream->set_parameters(mStream, keysAndValues);->set_parameters(mStream, keysAndValues);

The call points to the out_set_parameters function of audio_hw.c

What a struggle! So many twists and turns.

Parameter 1: The name is also called mStream, fuck! The corresponding object should be StreamOut.

Parameter 2: keysAndValues: simple understanding is a keyword pair,

I checked. It's probably like this


"routing,整数((int)patch->sources[0].ext.device.type)"
"input_source,整数((int)patch->sinks[0].ext.mix.usecase.source)""routing,整数((int)patch->sources[0].ext.device.type)"
"input_source,整数((int)patch->sinks[0].ext.mix.usecase.source)"

Then go to audio_hw.c to analyze kvpairs.

//out_set_parameters
if (new_dev != AUDIO_DEVICE_NONE) {
    //在实际路由之前通知adm(aDsp那边的音频数据管理器)以防止毛刺(pop音)。
    adev->adm_on_routing_change
    ...
    select_devices(adev, out->usecase);
    ...
}
if ( new_dev  !=  AUDIO_DEVICE_NONE ) {
     //Notify adm (audio data manager on aDsp side) before actual routing to prevent glitches (pops). 
    adev -> adm_on_routing_change 
    ... select_devices ( adev , out -> usecase ); 
    ... 
}
    

Then came the select_devices step:

//select_devices
usecase = get_usecase_from_list
...
enable_audio_route(adev, usecase);  
usecase = get_usecase_from_list
...
enable_audio_route(adev, usecase);  

Then


//enable_audio_route
...
platform_add_backend_name(adev->platform, mixer_path, snd_device);
audio_route_apply_and_update_path(adev->audio_route, mixer_path);
...//enable_audio_route
...
platform_add_backend_name(adev->platform, mixer_path, snd_device);
audio_route_apply_and_update_path(adev->audio_route, mixer_path);
...

next to

system/media/audio_route/audio_route.

Then we take a serious look at the process:


int audio_route_apply_and_update_path(struct audio_route *ar, const char *name)
{
    if (audio_route_apply_path(ar, name) < 0) {
        return -1;
    }
    return audio_route_update_path(ar, name, false /*reverse*/);
}int audio_route_apply_and_update_path(struct audio_route *ar, const char *name)
{
    if (audio_route_apply_path(ar, name) < 0) {
        return -1;
    }
    return audio_route_update_path(ar, name, false /*reverse*/);
}

this is the sequel

 

Guess you like

Origin blog.csdn.net/bberdong/article/details/80484568
Recommended