Android AudioRecord录音功能实现并pcm转wav

简述
AudioRecord 类是管理Android应用程序的音频资源,以便开发者通过此类能够录制相关的硬件所收集的声音。这是通过从 AudioRecord 对象通过“pulling”同步(reading)数据来实现的。应用程序负责使用以下三种方法之一及时轮询 AudioRecord 对象 read(byte[], int, int):read(short[], int, int) 或read(java.nio.ByteBuffer, int)。选择使用哪种方法将基于 AudioRecord 用户最方便的音频数据存储格式。
在创建时,AudioRecord 对象初始化其关联的音频缓冲区,它将用新的音频数据填充。这个缓冲区的大小,在构造过程中指定,决定了 AudioRecord 在“溢出”尚未读取的数据之前可以记录多长时间。应该从音频硬件中以小于总记录缓冲区大小的块读取数据。
创建 AudioRecord 实例的应用程序需要 Manifest.permission.RECORD_AUDIO否则 Builder 将抛出 UnsupportedOperationException

在这里插入图片描述

录音步骤
1.创建AudioRecord对象
  • audioSource:音频采集的来源,可以是麦克风声音、通话声音、系统内置声音
  • sampleRateInHz: 音频采样率
  • channelConfig:单声道、双声道…
  • audioFormat:音频采样精度,指定采样的数据的格式和每次采样的大小,只支持8位和16位
  • bufferSizeInBytes:录音期间音频数据写入的缓冲区的总大小(以字节为单位),可以通过getMinBufferSize()获取系统最新缓冲区大小.
audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, sampleRateInHz, channelConfig,
                audioFormat, recordMinBufferSize);
2.创建buffer

可以通过AudioRecord.getMinBufferSize()获取系统最新缓冲区大小.

int recordMinBufferSize = AudioRecord.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);
//指定 AudioRecord 缓冲区大小
buffer = new byte[recordMinBufferSize];
3.开始录音

创建一个IO流,一边从 AudioRecord 中读取声音数据到初始化的 buffer,一边将 buffer 中数据导入IO流.

 if (audioRecord.getState() == AudioRecord.RECORDSTATE_STOPPED) {
    
    
            recorderState = true;
            audioRecord.startRecording();
            new RecordThread(path, name).start();
        } else {
    
    
            Log.i(TAG, "start: " + audioRecord.getState());
        }


...

 FileOutputStream fos = null;
            try {
    
    
                fos = new FileOutputStream(cachePath);
            } catch (FileNotFoundException e) {
    
    
                e.printStackTrace();

            }

            if (fos == null) {
    
    
                Log.i(TAG, "run: 未找到缓存文件" + cachePath);
                return;
            }

            //获取到的pcm数据就是buffer
            int read;
            while (recorderState && !isInterrupted()) {
    
    
                read = audioRecord.read(buffer, 0, buffer.length);
                if (AudioRecord.ERROR_INVALID_OPERATION != read) {
    
    

                    try {
    
    
                        fos.write(buffer);
                        Log.i(TAG, "run: 写录音数据->" + read);
                    } catch (IOException e) {
    
    
                        e.printStackTrace();
                    }

                }
            }
			
            try {
    
    
                fos.flush();
                fos.close();
            } catch (IOException e) {
    
    
                e.printStackTrace();
            }
            //获取的原始pcm音频数据,看自己需求处理,这边转为wav储存本地.
            PcmToWavUtil.getInstance().pcmToWav(cachePath, path + name);
            
4.停止录音
 public void stop() {
    
    
        recorderState = false;
        if (audioRecord.getState() == AudioRecord.RECORDSTATE_RECORDING) {
    
    
            audioRecord.stop();
        }
    }
5.释放资源
public void release() {
    
    
        recorderState = false;
        if (audioRecord.getState() == AudioRecord.RECORDSTATE_RECORDING) {
    
    
            audioRecord.stop();
        }
        audioRecord.release();
        audioRecord = null;
    }
使用示例

完整使用示例可以看下面代码,录音缓存pcm,
pcm转wav到存储本地.

public class AudioRecordUtil {
    
    

    //设置音频采样率,44100是目前的标准,但是某些设备仍然支持22050,16000,11025
    //设置音频的录制的声道CHANNEL_IN_STEREO为双声道,CHANNEL_IN_MONO为单声道
    //音频数据格式:PCM 16位每个样本。保证设备支持。PCM 8位每个样本。不一定能得到设备支持。
    /*public static final int sampleRateInHz = 44100;
    public static final int channelConfig = AudioFormat.CHANNEL_IN_STEREO;*/

    public static final int sampleRateInHz = 16000;
    public static final int channelConfig = AudioFormat.CHANNEL_IN_MONO;
   // public static final int channelConfig = AudioFormat.CHANNEL_IN_STEREO;
    public static final int audioFormat = AudioFormat.ENCODING_PCM_16BIT;

    //录制状态
    private boolean recorderState = true;
    private byte[] buffer;
    private AudioRecord audioRecord;
    private static AudioRecordUtil audioRecordUtil = new AudioRecordUtil();
    private String TAG = "AudioRecordUtil";

    public static AudioRecordUtil getInstance() {
    
    
        return audioRecordUtil;
    }

    private AudioRecordUtil() {
    
    
        init();
    }

    @SuppressLint("MissingPermission")
    private void init() {
    
    
        int recordMinBufferSize = AudioRecord.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);
        //指定 AudioRecord 缓冲区大小
        buffer = new byte[recordMinBufferSize];
        //根据录音参数构造AudioRecord实体对象
        audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, sampleRateInHz, channelConfig,
                audioFormat, recordMinBufferSize);
    }

    /**
     * 开始录制
     */
    public void start(String path, String name) {
    
    
        if (audioRecord.getState() == AudioRecord.RECORDSTATE_STOPPED) {
    
    
            recorderState = true;
            audioRecord.startRecording();
            new RecordThread(path, name).start();
        } else {
    
    
            Log.i(TAG, "start: " + audioRecord.getState());
        }
    }

    /**
     * 停止录制
     */
    public void stop() {
    
    
        recorderState = false;
        if (audioRecord.getState() == AudioRecord.RECORDSTATE_RECORDING) {
    
    
            audioRecord.stop();
        }
    }


    public void release() {
    
    
        recorderState = false;
        if (audioRecord.getState() == AudioRecord.RECORDSTATE_RECORDING) {
    
    
            audioRecord.stop();
        }
        audioRecord.release();
        audioRecord = null;
    }

    private class RecordThread extends Thread {
    
    


        private String cachePath;
        private String name;
        private String path;


        public RecordThread(String path, String name) {
    
    
            this.path = path;
            this.name = name;
            this.cachePath = path + "cache.pcm";
        }

        @Override
        public void run() {
    
    

            Log.i(TAG, "run: pcm目录=path" + cachePath);

            FileUtils.delete(cachePath);

            boolean file = FileUtils.createFile(path, "cache.pcm");
            if (file)
                Log.i(TAG, "run: 创建缓存文件成功:" + cachePath);
            else {
    
    
                Log.i(TAG, "run: 创建缓存文件失败:" + cachePath);
                return;
            }
            FileOutputStream fos = null;
            try {
    
    
                fos = new FileOutputStream(cachePath);
            } catch (FileNotFoundException e) {
    
    
                e.printStackTrace();

            }

            if (fos == null) {
    
    
                Log.i(TAG, "run: 未找到缓存文件" + cachePath);
                return;
            }

            //获取到的pcm数据就是buffer
            int read;
            while (recorderState && !isInterrupted()) {
    
    
                read = audioRecord.read(buffer, 0, buffer.length);
                if (AudioRecord.ERROR_INVALID_OPERATION != read) {
    
    

                    try {
    
    
                        fos.write(buffer);
                        Log.i(TAG, "run: 写录音数据->" + read);
                    } catch (IOException e) {
    
    
                        e.printStackTrace();
                    }

                }
            }

            try {
    
    
                fos.flush();
                fos.close();
            } catch (IOException e) {
    
    
                e.printStackTrace();
            }

            PcmToWavUtil.getInstance().pcmToWav(cachePath, path + name);

        }
    }


    public byte[] convert(String path) throws IOException {
    
    

        FileInputStream fis = new FileInputStream(path);
        ByteArrayOutputStream bos = new ByteArrayOutputStream();
        byte[] b = new byte[1024];

        for (int readNum; (readNum = fis.read(b)) != -1; ) {
    
    
            bos.write(b, 0, readNum);
        }

        byte[] bytes = bos.toByteArray();

        fis.close();
        bos.close();
        return bytes;
    }

}


pcm转wav

pcm原始数据,转成wav要加上数据头.


public class PcmToWavUtil {
    
    
    private int mBufferSize; //缓存的音频大小
    private int mSampleRate = 44100;// 此处的值必须与录音时的采样率一致
    private int mChannel = AudioFormat.CHANNEL_IN_STEREO; //立体声
    private int mEncoding = AudioFormat.ENCODING_PCM_16BIT;

    private static class SingleHolder {
    
    
        static PcmToWavUtil mInstance = new PcmToWavUtil();
    }

    public static PcmToWavUtil getInstance() {
    
    
        return SingleHolder.mInstance;
    }


    public PcmToWavUtil() {
    
    
        mSampleRate = AudioRecordUtil.sampleRateInHz;
        mChannel = AudioRecordUtil.channelConfig;
        mEncoding = AudioRecordUtil.audioFormat;
        Log.i("AudioRecordUtil", "PcmToWavUtil:mChannel "+mChannel +"mEncoding:"+mEncoding+"mSampleRate:"+mSampleRate);
        this.mBufferSize = AudioRecord.getMinBufferSize(mSampleRate, mChannel, mEncoding);
    }

    /**
     * @param sampleRate sample rate、采样率
     * @param channel    channel、声道
     * @param encoding   Audio data format、音频格式
     */
    public PcmToWavUtil(int sampleRate, int channel, int encoding) {
    
    
        this.mSampleRate = sampleRate;
        this.mChannel = channel;
        this.mEncoding = encoding;
        this.mBufferSize = AudioRecord.getMinBufferSize(mSampleRate, mChannel, mEncoding);
    }

    /**
     * pcm文件转wav文件
     *
     * @param inFilename  源文件路径
     * @param outFilename 目标文件路径
     * @param deleteOrg   是否删除源文件
     */
    public void pcmToWav(String inFilename, String outFilename, boolean deleteOrg) {
    
    
        FileInputStream in;
        FileOutputStream out;
        long totalAudioLen;
        long totalDataLen;
        long longSampleRate = mSampleRate;
        int channels = mChannel == AudioFormat.CHANNEL_IN_MONO ? 1 : 2;
        //long byteRate = 16 * mSampleRate * channels / 8;
       long byteRate = mEncoding * longSampleRate * channels /8;
        byte[] data = new byte[mBufferSize];
        try {
    
    
            in = new FileInputStream(inFilename);
            out = new FileOutputStream(outFilename);
            totalAudioLen = in.getChannel().size();
            totalDataLen = totalAudioLen + 36;

            writeWaveFileHeader(out, totalAudioLen, totalDataLen,
                    longSampleRate, channels, byteRate);
            while (in.read(data) != -1) {
    
    
                out.write(data);
            }
            in.close();
            out.flush();
            out.close();
            if (deleteOrg) {
    
    
                new File(inFilename).delete();
            }
        } catch (IOException e) {
    
    
            e.printStackTrace();
        }
    }

    public void pcmToWav(String inFilename, String outFilename) {
    
    
        pcmToWav(inFilename, outFilename, false);
    }

    /**
     * 加入wav文件头
     */
    private void writeWaveFileHeader(FileOutputStream out, long totalAudioLen,
                                     long totalDataLen, long longSampleRate, int channels, long byteRate)
            throws IOException {
    
    
        byte[] header = new byte[44];
        // ChunkID, RIFF, 占4bytes
        header[0] = 'R';
        header[1] = 'I';
        header[2] = 'F';
        header[3] = 'F';
        // ChunkSize, pcmLen + 36, 占4bytes
        header[4] = (byte) (totalDataLen & 0xff);
        header[5] = (byte) ((totalDataLen >> 8) & 0xff);
        header[6] = (byte) ((totalDataLen >> 16) & 0xff);
        header[7] = (byte) ((totalDataLen >> 24) & 0xff);
        // Format, WAVE, 占4bytes
        header[8] = 'W';
        header[9] = 'A';
        header[10] = 'V';
        header[11] = 'E';
        // Subchunk1ID, 'fmt ', 占4bytes
        header[12] = 'f'; // 'fmt ' chunk
        header[13] = 'm';
        header[14] = 't';
        header[15] = ' ';
        // Subchunk1Size, 16, 占4bytes
        header[16] = 16;
        header[17] = 0;
        header[18] = 0;
        header[19] = 0;
        // AudioFormat, pcm = 1, 占2bytes
        header[20] = 1;
        header[21] = 0;
        // NumChannels, mono = 1, stereo = 2, 占2bytes
        header[22] = (byte) channels;
        header[23] = 0;
        // SampleRate, 占4bytes
        header[24] = (byte) (longSampleRate & 0xff);
        header[25] = (byte) ((longSampleRate >> 8) & 0xff);
        header[26] = (byte) ((longSampleRate >> 16) & 0xff);
        header[27] = (byte) ((longSampleRate >> 24) & 0xff);
        // ByteRate = SampleRate * NumChannels * BitsPerSample / 8, 占4bytes
        header[28] = (byte) (byteRate & 0xff);
        header[29] = (byte) ((byteRate >> 8) & 0xff);
        header[30] = (byte) ((byteRate >> 16) & 0xff);
        header[31] = (byte) ((byteRate >> 24) & 0xff);
        // BlockAlign = NumChannels * BitsPerSample / 8, 占2bytes
        header[32] = (byte) (channels * mEncoding / 8);
        header[33] = 0;
        // BitsPerSample, 占2bytes
        header[34] = 16;
        header[35] = 0;
        // Subhunk2ID, data, 占4bytes
        header[36] = 'd';
        header[37] = 'a';
        header[38] = 't';
        header[39] = 'a';
        // Subchunk2Size, 占4bytes
        header[40] = (byte) (totalAudioLen & 0xff);
        header[41] = (byte) ((totalAudioLen >> 8) & 0xff);
        header[42] = (byte) ((totalAudioLen >> 16) & 0xff);
        header[43] = (byte) ((totalAudioLen >> 24) & 0xff);
        out.write(header, 0, 44);
    }
}

END.

猜你喜欢

转载自blog.csdn.net/qq_35193677/article/details/126726417