First, using a sound recording program written in c ++
1. PCM audio data is original audio data, can not use the player to play, you need to add to it a head, indicating that there are several sound channels, sample rate is the number, and so on. The
converted PCM audio data into WAV format, so that other players to play out.
2. To determine the recording parameter 3
(1) Sampling rate: a second sampling frequency sound wave. Commonly used sampling rate of 8000,11025,22050,32000,44100.
High version of Android should be able to support higher sampling rates.
(2) using the bit number per sample value represented by
the current fixed on 16bit system Android
(3) the number of channels
value stereo left and right channels for each sampling point of record: Stereo
Mono: Mono
3. tinyplay tool can only play two-channel audio data.
4. Test Procedure
(1) AudioRecordTest.cpp, for pcm data not
#include <utils/Log.h> #include <media/AudioRecord.h> #include <stdlib.h> using namespace android; //============================================== // Audio Record Defination //============================================== #ifdef LOG_TAG #undef LOG_TAG #endif #define LOG_TAG "AudioRecordTest" static pthread_t g_AudioRecordThread; static pthread_t * g_AudioRecordThreadPtr = NULL; volatile bool g_bQuitAudioRecordThread = false; volatile int g_iInSampleTime = 0; int g_iNotificationPeriodInFrames = 8000/10; // g_iNotificationPeriodInFrames should be change when sample rate changes. static void * AudioRecordThread(int sample_rate, int channels, void *fileName) { uint64_t inHostTime = 0; void * inBuffer = NULL; audio_source_t inputSource = AUDIO_SOURCE_MIC; audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT; audio_channel_mask_t channelConfig = AUDIO_CHANNEL_IN_MONO; int bufferSizeInBytes; int sampleRateInHz = sample_rate; //8000; //44100; android::AudioRecord * pAudioRecord = NULL; FILE * g_pAudioRecordFile = NULL; char * strAudioFile = (char *)fileName; int iNbChannels = channels; // 1 channel for mono, 2 channel for streo int iBytesPerSample = 2; // 16bits pcm, 2Bytes int frameSize = 0; // frameSize = iNbChannels * iBytesPerSample size_t minFrameCount = 0; // get from AudroRecord object int iWriteDataCount = 0; // how many data are there write to file // log the thread id for debug info ALOGD("%s Thread ID = %d \n", __FUNCTION__, pthread_self()); g_iInSampleTime = 0; g_pAudioRecordFile = fopen(strAudioFile, "wb+"); //printf("sample_rate = %d, channels = %d, iNbChannels = %d, channelConfig = 0x%x\n", sample_rate, channels, iNbChannels, channelConfig); //iNbChannels = (channelConfig == AUDIO_CHANNEL_IN_STEREO) ? 2 : 1; if (iNbChannels == 2) { channelConfig = AUDIO_CHANNEL_IN_STEREO; } printf("sample_rate = %d, channels = %d, iNbChannels = %d, channelConfig = 0x%x\n", sample_rate, channels, iNbChannels, channelConfig); frameSize = iNbChannels * iBytesPerSample; android::status_t status = android::AudioRecord::getMinFrameCount( &minFrameCount, sampleRateInHz, audioFormat, channelConfig); if(status != android::NO_ERROR) { ALOGE("%s AudioRecord.getMinFrameCount fail \n", __FUNCTION__); goto exit ; } ALOGE("sampleRateInHz = %d minFrameCount = %d iNbChannels = %d channelConfig = 0x%x frameSize = %d ", sampleRateInHz, minFrameCount, iNbChannels, channelConfig, frameSize); bufferSizeInBytes = minFrameCount * frameSize; inBuffer = malloc(bufferSizeInBytes); if(inBuffer == NULL) { ALOGE("%s alloc mem failed \n", __FUNCTION__); goto exit ; } g_iNotificationPeriodInFrames = sampleRateInHz/10; pAudioRecord = new android::AudioRecord(); if(NULL == pAudioRecord) { ALOGE(" create native AudioRecord failed! "); goto exit; } pAudioRecord->set( inputSource, sampleRateInHz, audioFormat, channelConfig, 0, NULL, //AudioRecordCallback, NULL, 0, true, 0); if(pAudioRecord->initCheck() != android::NO_ERROR) { ALOGE("AudioTrack initCheck error!"); goto exit; } if(pAudioRecord->start()!= android::NO_ERROR) { ALOGE("AudioTrack start error!"); goto exit; } while (!g_bQuitAudioRecordThread) { int readLen = pAudioRecord->read(inBuffer, bufferSizeInBytes); int writeResult = -1; if(readLen > 0) { iWriteDataCount += readLen; if(NULL != g_pAudioRecordFile) { writeResult = fwrite(inBuffer, 1, readLen, g_pAudioRecordFile); if(writeResult < readLen) { ALOGE("Write Audio Record Stream error"); } } //ALOGD("readLen = %d writeResult = %d iWriteDataCount = %d", readLen, writeResult, iWriteDataCount); } else { ALOGE("pAudioRecord->read readLen = 0"); } } exit: if(NULL != g_pAudioRecordFile) { fflush(g_pAudioRecordFile); fclose(g_pAudioRecordFile); g_pAudioRecordFile = NULL; } if(pAudioRecord) { pAudioRecord->stop(); //delete pAudioRecord; //pAudioRecord == NULL; } if(inBuffer) { free(inBuffer); inBuffer = NULL; } ALOGD("%s Thread ID = %d quit\n", __FUNCTION__, pthread_self()); return NULL; } int main(int argc, char **argv) { if (argc != 4) { printf("Usage:\n"); printf("%s <sample_rate> <channels> <out_file>\n", argv[0]); return -1; } AudioRecordThread(strtol(argv[1], NULL, 0), strtol(argv[2], NULL, 0), argv[3]); return 0; }
(2) pcm2wav.cpp, for converting the wav format pcm
#include <stdio.h> #include <string.h> #include <stdlib.h> /* https://blog.csdn.net/u010011236/article/details/53026127 */ /** * Convert PCM16LE raw data to WAVE format * @param pcmpath Input PCM file. * @param channels Channel number of PCM file. * @param sample_rate Sample rate of PCM file. * @param wavepath Output WAVE file. */ int simplest_pcm16le_to_wave(const char *pcmpath, int sample_rate, int channels, const char *wavepath) { typedef struct WAVE_HEADER{ char fccID[4]; //ÄÚÈÝΪ""RIFF unsigned long dwSize; //×îºóÌîд£¬WAVE¸ñʽÒôƵµÄ´óС char fccType[4]; //ÄÚÈÝΪ"WAVE" }WAVE_HEADER; typedef struct WAVE_FMT{ char fccID[4]; //ÄÚÈÝΪ"fmt " unsigned long dwSize; //ÄÚÈÝΪWAVE_FMTÕ¼µÄ×Ö½ÚÊý£¬Îª16 unsigned short wFormatTag; //Èç¹ûΪPCM£¬¸ÄֵΪ 1 unsigned short wChannels; //ͨµÀÊý£¬µ¥Í¨µÀ=1£¬Ë«Í¨µÀ=2 unsigned long dwSamplesPerSec;//²ÉÓÃƵÂÊ unsigned long dwAvgBytesPerSec;/* ==dwSamplesPerSec*wChannels*uiBitsPerSample/8 */ unsigned short wBlockAlign;//==wChannels*uiBitsPerSample/8 unsigned short uiBitsPerSample;//ÿ¸ö²ÉÑùµãµÄbitÊý£¬8bits=8, 16bits=16 }WAVE_FMT; typedef struct WAVE_DATA{ char fccID[4]; //ÄÚÈÝΪ"data" unsigned long dwSize; //==NumSamples*wChannels*uiBitsPerSample/8 }WAVE_DATA; #if 0 if(channels==2 || sample_rate==0) { channels = 2; sample_rate = 44100; } #endif int bits = 16; WAVE_HEADER pcmHEADER; WAVE_FMT pcmFMT; WAVE_DATA pcmDATA; unsigned short m_pcmData; FILE *fp, *fpout; fp = fopen(pcmpath, "rb+"); if(fp==NULL) { printf("Open pcm file error.\n"); return -1; } fpout = fopen(wavepath, "wb+"); if(fpout==NULL) { printf("Create wav file error.\n"); return -1; } /* WAVE_HEADER */ memcpy(pcmHEADER.fccID, "RIFF", strlen("RIFF")); memcpy(pcmHEADER.fccType, "WAVE", strlen("WAVE")); fseek(fpout, sizeof(WAVE_HEADER), 1); //1=SEEK_CUR /* WAVE_FMT */ memcpy(pcmFMT.fccID, "fmt ", strlen("fmt ")); pcmFMT.dwSize = 16; pcmFMT.wFormatTag = 1; pcmFMT.wChannels = channels; pcmFMT.dwSamplesPerSec = sample_rate; pcmFMT.uiBitsPerSample = bits; /* ==dwSamplesPerSec*wChannels*uiBitsPerSample/8 */ pcmFMT.dwAvgBytesPerSec = pcmFMT.dwSamplesPerSec*pcmFMT.wChannels*pcmFMT.uiBitsPerSample/8; /* ==wChannels*uiBitsPerSample/8 */ pcmFMT.wBlockAlign = pcmFMT.wChannels*pcmFMT.uiBitsPerSample/8; fwrite(&pcmFMT, sizeof(WAVE_FMT), 1, fpout); /* WAVE_DATA */ memcpy(pcmDATA.fccID, "data", strlen("data")); pcmDATA.dwSize = 0; fseek(fpout, sizeof(WAVE_DATA), SEEK_CUR); fread(&m_pcmData, sizeof(unsigned short), 1, fp); while(!feof(fp)) { pcmDATA.dwSize += 2; fwrite(&m_pcmData, sizeof(unsigned short), 1, fpout); fread(&m_pcmData, sizeof(unsigned short), 1, fp); } /*pcmHEADER.dwSize = 44 + pcmDATA.dwSize;*/ //ÐÞ¸Äʱ¼ä£º2018Äê1ÔÂ5ÈÕ pcmHEADER.dwSize = 36 + pcmDATA.dwSize; rewind(fpout); fwrite(&pcmHEADER, sizeof(WAVE_HEADER), 1, fpout); fseek(fpout, sizeof(WAVE_FMT), SEEK_CUR); fwrite(&pcmDATA, sizeof(WAVE_DATA), 1, fpout); fclose(fp); fclose(fpout); return 0; } int main(int argc, char **argv) { if (argc != 5) { printf("Usage:\n"); printf("%s <input pcm file> <sample_rate> <channels> <output wav file>\n", argv[0]); return -1; } simplest_pcm16le_to_wave(argv[1], strtol(argv[2], NULL, 0), strtol(argv[3], NULL, 0), argv[4]); return 0; }
(3)Android.mk
LOCAL_PATH:= $(call my-dir) include $(CLEAR_VARS) LOCAL_SRC_FILES:= \ AudioRecordTest.cpp LOCAL_SHARED_LIBRARIES := \ libcutils \ libutils \ libmedia LOCAL_MODULE:= audio_record_test LOCAL_MODULE_TAGS := tests include $(BUILD_EXECUTABLE) include $(CLEAR_VARS) LOCAL_SRC_FILES:= \ pcm2wav.cpp LOCAL_SHARED_LIBRARIES := \ libcutils \ libutils \ libmedia LOCAL_MODULE:= pcm2wav LOCAL_MODULE_TAGS := tests include $(BUILD_EXECUTABLE)
Then use tinyplay playing wav files generated.
Reference recording process:
the Android Native C ++ layer recording PCM audio AudioRecord used: https://blog.csdn.net/romantic_energy/article/details/50521970
Referring to wav pcm:
PCM, WAV format described using C language and implement PCM turn WAV: https://blog.csdn.net/u010011236/article/details/53026127
4. Only one side of the headphones play sound reasons
44100 2 my.pcm ./AudioRecordTest
./pcm2wav my.pcm 44100 2 my.wav
tinyplay my.wav
only one ear hear the sound
44100 1 my.pcm ./AudioRecordTest
./pcm2wav my.pcm 44100 1 my.wav
tinyplay not play monaural sound, with the other player to play my.wav, 2 Ge ears hear the sound
Why the double pass sound recording, only one ear sound when playing?
Generic mono but when recording, playing two ears are the sound?
Answer:
. A hardware, the drive is a two-channel; but we only take a the MIC, two-channel data obtained when the driving program recording, wherein a data channel is constant 0
B If the recording time AudioRecordTest. specifies the two-channel, the PCM data so obtained in a channel where 0 is constant, it will result in only one ear sound playback
c. in AudioRecordTest monaural recording If specified, contains only PCM data obtained a data channel, which is hardware left and right channels of the mixing, the mix
is AudioFlinger system implementation. When playing single-channel data, AudioFlinger mono system will send data only hardware Left DAC (left channel), also issued a hard
piece Right DAC (right channel), so the two ears can hear sound
Second, the frame and the code recording process
1. playbackThread is MixerThread, corresponding to a plurality of thread App.
2. Android native recording process
to find the corresponding device based on the sound source App incoming
find profile (audio_policy.conf produced)
find module according to profile, that is, corresponds to a sound card, and then load corresponding sound of HAL file
calls the HAL file openInput () to open a channel input.
3. Recording App as long as the implementation of the set (), creates a RecordThread (), App multiple concurrent access to the sound card may result, leading to competition visit
ask questions sound data.
4. Recording frame and processes the code
. A APP created, AudioRecord provided, specifies the sound source: inputSource, for example: AUDIO_SOURCE_MIC, also specified sampling rate, number of channels, format and other parameters
b AudioPolicyManager recording apparatus according to the determined parameters inputSource:. Device
c. AudioFlinger create a RecordThread, after the thread reads sound data from said device
d. RecordTrack to create a corresponding AudioRecord APP inside RecordThread, RecordTrack AudioRecord RecordThread APP and the interior of the shared memory to pass data through
e. RecordThread from HAL obtained data, and then through the interior of the RecordTrack data to the APP AudioRecord
Note:
In native code, a AudioRecord APP will lead to the creation of a RecordThread, there is a device in multiple RecordThread may,
at any time only one RecordThread running, the only one in the recording APP, not more APP simultaneous recording
Third, modify the code to support multi-APP simultaneous recording
Modify AudioPolicyManager.cpp, patches are as follows:
Subject: [PATCH] v2, support Multi AudioRecord at same time --- AudioPolicyManager.cpp | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/AudioPolicyManager.cpp b/AudioPolicyManager.cpp index 536987a..6c87508 100644 --- a/AudioPolicyManager.cpp +++ b/AudioPolicyManager.cpp @@ -1356,6 +1356,17 @@ audio_io_handle_t AudioPolicyManager::getInput(audio_source_t inputSource, config.channel_mask = channelMask; config.format = format; + /* check wether have an AudioInputDescriptor use the same profile */ + for (size_t input_index = 0; input_index < mInputs.size(); input_index++) { + sp<AudioInputDescriptor> desc; + desc = mInputs.valueAt(input_index); + if (desc->mProfile == profile) { + desc->mOpenRefCount++; // ÒýÓüÆÊý¼Ó1 + desc->mSessions.add(session); // session + return desc->mIoHandle; + } + } + status_t status = mpClientInterface->openInput(profile->mModule->mHandle, &input, &config, -- 1.9.1