Android Framework Audio Subsystem (16) AudioRecord Case

This series of articles Master link: Thematic sub-directory Android Framework Class Audio Subsystem


Summary and description of key points in this chapter:

This chapter mainly focuses on ➕ the upper left recording part of the above mind map. It mainly explains the basic knowledge of some recordings, and at the same time has an understanding of the recording process of android through a Native test program.


1 Basic knowledge of recording

@ 1 Relationship between PCM and WAV

PCM audio data is the original audio data, which cannot be played by the player. You need to add a header to it to indicate how many channels the sound has, what the sampling rate is, and so on. Convert
PCM audio data to WAV format so that other players can play it.

@ 2 3 key parameters of recording

  • Sampling rate: the number of times the sound wave is sampled per second. Common sampling rates are 8000, 11025, 44100. . .
  • Sampling accuracy: Currently fixed to 16bit on Android system.
  • Number of channels: two channels (Stereo stereo, each sample point records the value of the left and right channels) or mono (Mono).

2 Recording test procedure

There are three key documents involved:

  • AudioRecordTest.cpp: recording program, finally output pcm audio format data.
  • pcm2wav.cpp: add header information in pcm format and convert to WAV format.
  • Android.mk: Compile configuration.

@ 1 AudioRecordTest.cpp, used to record pcm data, but no header information is added here. code show as below:

#include <utils/Log.h>
#include <media/AudioRecord.h>
#include <stdlib.h>

using namespace android;
 
//==============================================
//  Audio Record Defination
//==============================================
#ifdef LOG_TAG
#undef LOG_TAG
#endif

#define LOG_TAG "AudioRecordTest"
 
static pthread_t    g_AudioRecordThread;
static pthread_t *  g_AudioRecordThreadPtr = NULL;
 
volatile bool       g_bQuitAudioRecordThread = false;
volatile int        g_iInSampleTime = 0;
int                 g_iNotificationPeriodInFrames = 8000/10; 
// g_iNotificationPeriodInFrames should be change when sample rate changes.

static void *   AudioRecordThread(int sample_rate, int channels, void *fileName)
{
    uint64_t                        inHostTime              = 0;
    void *                              inBuffer                    = NULL; 
    audio_source_t              inputSource             = AUDIO_SOURCE_MIC;
    audio_format_t              audioFormat             = AUDIO_FORMAT_PCM_16_BIT;  
    audio_channel_mask_t    channelConfig       = AUDIO_CHANNEL_IN_MONO;
    int                                     bufferSizeInBytes;
    int                                     sampleRateInHz      = sample_rate; //8000; //44100; 
    android::AudioRecord *  pAudioRecord        = NULL;
    FILE *                                  g_pAudioRecordFile      = NULL;
    char *                                      strAudioFile                = (char *)fileName;
 
    int iNbChannels         = channels; // 1 channel for mono, 2 channel for streo
    int iBytesPerSample = 2;    // 16bits pcm, 2Bytes
    int frameSize           = 0;    // frameSize = iNbChannels * iBytesPerSample
    size_t  minFrameCount   = 0;    // get from AudroRecord object
    int iWriteDataCount = 0;    // how many data are there write to file
    
    // log the thread id for debug info
    ALOGD("%s  Thread ID  = %d  \n", __FUNCTION__,  pthread_self());  
    g_iInSampleTime = 0;
    g_pAudioRecordFile = fopen(strAudioFile, "wb+");    
    
    //printf("sample_rate = %d, channels = %d, iNbChannels = %d, channelConfig = 0x%x\n", sample_rate, channels, iNbChannels, channelConfig);
    
    //iNbChannels = (channelConfig == AUDIO_CHANNEL_IN_STEREO) ? 2 : 1;
    if (iNbChannels == 2) {
        channelConfig = AUDIO_CHANNEL_IN_STEREO;
    }
    printf("sample_rate = %d, channels = %d, iNbChannels = %d, channelConfig = 0x%x\n", sample_rate, channels, iNbChannels, channelConfig);
    
    frameSize   = iNbChannels * iBytesPerSample;    
    
    android::status_t   status = android::AudioRecord::getMinFrameCount(
        &minFrameCount, sampleRateInHz, audioFormat, channelConfig);    
    
    if(status != android::NO_ERROR)
    {
        ALOGE("%s  AudioRecord.getMinFrameCount fail \n", __FUNCTION__);
        goto exit ;
    }
    
    ALOGE("sampleRateInHz = %d minFrameCount = %d iNbChannels = %d channelConfig = 0x%x frameSize = %d ", 
        sampleRateInHz, minFrameCount, iNbChannels, channelConfig, frameSize);  
    
    bufferSizeInBytes = minFrameCount * frameSize;
    //申请内存
    inBuffer = malloc(bufferSizeInBytes); 
    if(inBuffer == NULL)
    {       
        ALOGE("%s  alloc mem failed \n", __FUNCTION__);     
        goto exit ; 
    }
 
    g_iNotificationPeriodInFrames = sampleRateInHz/10;  
    //创建AudioRecord
    pAudioRecord  = new android::AudioRecord(); 
    if(NULL == pAudioRecord)
    {
        ALOGE(" create native AudioRecord failed! ");
        goto exit;
    }
    //set操作
    pAudioRecord->set( inputSource,
                                    sampleRateInHz,
                                    audioFormat,
                                    channelConfig,
                                    0,
                                    NULL, //AudioRecordCallback,
                                    NULL,
                                    0,
                                    true,
                                    0); 
 
    if(pAudioRecord->initCheck() != android::NO_ERROR)  
    {
        ALOGE("AudioTrack initCheck error!");
        goto exit;
    }
    //开始录音
    if(pAudioRecord->start()!= android::NO_ERROR)
    {
        ALOGE("AudioTrack start error!");
        goto exit;
    }   
    
    while (!g_bQuitAudioRecordThread)
    {
        //从底层读取音频数据
        int readLen = pAudioRecord->read(inBuffer, bufferSizeInBytes);      
        int writeResult = -1;
        
        if(readLen > 0) 
        {
            iWriteDataCount += readLen;
            if(NULL != g_pAudioRecordFile)
            {
                //将音频数据写入指定文件中
                writeResult = fwrite(inBuffer, 1, readLen, g_pAudioRecordFile);             
                if(writeResult < readLen)
                {
                    ALOGE("Write Audio Record Stream error");
                }
            }           
 
            //ALOGD("readLen = %d  writeResult = %d  iWriteDataCount = %d", readLen, writeResult, iWriteDataCount);         
        }
        else 
        {
            ALOGE("pAudioRecord->read  readLen = 0");
        }
    }
        
exit:
    if(NULL != g_pAudioRecordFile)
    {
        fflush(g_pAudioRecordFile);
        fclose(g_pAudioRecordFile);
        g_pAudioRecordFile = NULL;
    }
 
    if(pAudioRecord)
    {
        pAudioRecord->stop();
        //delete pAudioRecord;
        //pAudioRecord == NULL;
    }
 
    if(inBuffer)
    {
        free(inBuffer);
        inBuffer = NULL;
    }
    
    ALOGD("%s  Thread ID  = %d  quit\n", __FUNCTION__,  pthread_self());
    return NULL;
}

int main(int argc, char **argv)
{
    if (argc != 4)
    {
        printf("Usage:\n");
        printf("%s <sample_rate> <channels> <out_file>\n", argv[0]);
        return -1;
    }
    AudioRecordThread(strtol(argv[1], NULL, 0), strtol(argv[2], NULL, 0), argv[3]);
    return 0;
}

@ 2 pcm2wav.cpp

#include <stdio.h>
#include <string.h>
#include <stdlib.h>

/* 参考https://blog.csdn.net/u010011236/article/details/53026127 */

/**
 * Convert PCM16LE raw data to WAVE format
 * @param pcmpath       Input PCM file.
 * @param channels      Channel number of PCM file.
 * @param sample_rate   Sample rate of PCM file.
 * @param wavepath      Output WAVE file.
 */
int simplest_pcm16le_to_wave(const char *pcmpath, int sample_rate, int channels, const char *wavepath)
{
    typedef struct WAVE_HEADER{
        char    fccID[4];       //内容为""RIFF
        unsigned long dwSize;   //最后填写,WAVE格式音频的大小
        char    fccType[4];     //内容为"WAVE"
    }WAVE_HEADER;

    typedef struct WAVE_FMT{
        char    fccID[4];          //内容为"fmt "
        unsigned long  dwSize;     //内容为WAVE_FMT占的字节数,为16
        unsigned short wFormatTag; //如果为PCM,改值为 1
        unsigned short wChannels;  //通道数,单通道=1,双通道=2
        unsigned long  dwSamplesPerSec;//采用频率
        unsigned long  dwAvgBytesPerSec;/* ==dwSamplesPerSec*wChannels*uiBitsPerSample/8 */
        unsigned short wBlockAlign;//==wChannels*uiBitsPerSample/8
        unsigned short uiBitsPerSample;//每个采样点的bit数,8bits=8, 16bits=16
    }WAVE_FMT;

    typedef struct WAVE_DATA{
        char    fccID[4];       //内容为"data"
        unsigned long dwSize;   //==NumSamples*wChannels*uiBitsPerSample/8
    }WAVE_DATA;

    int bits = 16;

    WAVE_HEADER pcmHEADER;
    WAVE_FMT    pcmFMT;
    WAVE_DATA   pcmDATA;

    unsigned short m_pcmData;
    FILE *fp, *fpout;

    fp = fopen(pcmpath, "rb+");
    if(fp==NULL)
    {
        printf("Open pcm file error.\n");
        return -1;
    }
    fpout = fopen(wavepath, "wb+");
    if(fpout==NULL)
    {
        printf("Create wav file error.\n");
        return -1;
    }

    /* WAVE_HEADER */
    memcpy(pcmHEADER.fccID, "RIFF", strlen("RIFF"));
    memcpy(pcmHEADER.fccType, "WAVE", strlen("WAVE"));
    fseek(fpout, sizeof(WAVE_HEADER), 1);   //1=SEEK_CUR
    /* WAVE_FMT */
    memcpy(pcmFMT.fccID, "fmt ", strlen("fmt "));
    pcmFMT.dwSize = 16;
    pcmFMT.wFormatTag = 1;
    pcmFMT.wChannels = channels;
    pcmFMT.dwSamplesPerSec = sample_rate;
    pcmFMT.uiBitsPerSample = bits;
    /* ==dwSamplesPerSec*wChannels*uiBitsPerSample/8 */
    pcmFMT.dwAvgBytesPerSec = pcmFMT.dwSamplesPerSec*pcmFMT.wChannels*pcmFMT.uiBitsPerSample/8;
    /* ==wChannels*uiBitsPerSample/8 */
    pcmFMT.wBlockAlign = pcmFMT.wChannels*pcmFMT.uiBitsPerSample/8;


    fwrite(&pcmFMT, sizeof(WAVE_FMT), 1, fpout);

    /* WAVE_DATA */
    memcpy(pcmDATA.fccID, "data", strlen("data"));
    pcmDATA.dwSize = 0;
    fseek(fpout, sizeof(WAVE_DATA), SEEK_CUR);

    fread(&m_pcmData, sizeof(unsigned short), 1, fp);
    while(!feof(fp))
    {
        pcmDATA.dwSize += 2;
        fwrite(&m_pcmData, sizeof(unsigned short), 1, fpout);
        fread(&m_pcmData, sizeof(unsigned short), 1, fp);
    }

    /*pcmHEADER.dwSize = 44 + pcmDATA.dwSize;*/
    pcmHEADER.dwSize = 36 + pcmDATA.dwSize;

    rewind(fpout);
    fwrite(&pcmHEADER, sizeof(WAVE_HEADER), 1, fpout);
    fseek(fpout, sizeof(WAVE_FMT), SEEK_CUR);
    fwrite(&pcmDATA, sizeof(WAVE_DATA), 1, fpout);

    fclose(fp);
    fclose(fpout);

    return 0;
}

int main(int argc, char **argv)
{
	if (argc != 5)
	{
		printf("Usage:\n");
		printf("%s <input pcm file> <sample_rate> <channels>  <output wav file>\n", argv[0]);
		return -1;
	}
	
    simplest_pcm16le_to_wave(argv[1], strtol(argv[2], NULL, 0), strtol(argv[3], NULL, 0), argv[4]);

    return 0;
}

@ 3 Android.mk implementation

LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)

LOCAL_SRC_FILES:= \
	AudioRecordTest.cpp

LOCAL_SHARED_LIBRARIES := \
	libcutils \
	libutils \
    libmedia

LOCAL_MODULE:= AudioRecordTest

LOCAL_MODULE_TAGS := tests

include $(BUILD_EXECUTABLE)

include $(CLEAR_VARS)

LOCAL_SRC_FILES:= \
	pcm2wav.cpp

LOCAL_SHARED_LIBRARIES := \
	libcutils \
	libutils \
    libmedia

LOCAL_MODULE:= pcm2wav

LOCAL_MODULE_TAGS := tests

include $(BUILD_EXECUTABLE)

3 Test procedure execution steps

@ 1 Start recording:

 ./AudioRecordTest 44100 2 my.pcm

@ 2 File format conversion (PCM to WAV):

./pcm2wav my.pcm 44100 2 my.wav

4 Encountered problems and analysis

@ 1 tinyplay can't play mono sound

If you must play, you can only use other players.

@ 2 Why do I use dual sound when recording, and only one ear has sound during playback? And when using monophonic recording, both ears have sound during playback?

  • The hardware and driver are dual-channel; but we only received one MIC, so in the dual-channel data obtained by the driver during recording, one of the channel data is always 0, and if the AudioRecordTest recording is specified for dual-channel Channel, then one of the channels in the obtained PCM data is always 0, and it will cause only one ear to have sound when it is played.
  • If Mono is specified during AudioRecordTest recording, the obtained PCM data contains only one channel data, which is a mix of hardware left and right channels. This mix is ​​realized by AudioFlinger system. When playing mono data during playback, The AudioFlinger system will send mono data to both the hardware Left DAC (left channel) and the hardware Right DAC (right channel), so both ears can hear the sound
     
Published 289 original articles · praised 47 · 30,000+ views

Guess you like

Origin blog.csdn.net/vviccc/article/details/105468232