H5 实现麦克风

要在 HTML5 中使用麦克风,可以使用 Web Audio API 和 getUserMedia() 方法。以下是一个简单的示例:

<!DOCTYPE html>
<html>
<head>
  <meta charset="UTF-8">
  <title>使用麦克风</title>
</head>
<body>
  <button id="start-btn">开始</button>
  <button id="stop-btn" disabled>停止</button>
  <script>
    let audioContext, stream, recorder;

    const startBtn = document.getElementById('start-btn');
    const stopBtn = document.getElementById('stop-btn');

    startBtn.addEventListener('click', async () => {
      try {
        // 获取麦克风流
        stream = await navigator.mediaDevices.getUserMedia({audio: true});

        // 创建音频上下文
        audioContext = new AudioContext({sampleRate: 44100});

        // 创建 MediaStreamAudioSourceNode
        const sourceNode = audioContext.createMediaStreamSource(stream);

        // 创建录音器
        recorder = new Recorder(sourceNode);

        // 开始录制
        recorder.record();

        startBtn.disabled = true;
        stopBtn.disabled = false;
      } catch (error) {
        console.error(error);
      }
    });

    stopBtn.addEventListener('click', () => {
      // 停止录制
      recorder.stop();

      // 停止麦克风流
      const tracks = stream.getTracks();
      tracks.forEach(track => track.stop());

      // 导出录音
      recorder.exportWAV(blob => {
        // 创建音频元素
        const audioElement = document.createElement('audio');
        audioElement.controls = true;

        // 创建 URL
        const url = URL.createObjectURL(blob);
        audioElement.src = url;

        // 添加到页面
        document.body.appendChild(audioElement);
      });

      startBtn.disabled = false;
      stopBtn.disabled = true;
    });

    class Recorder {
      constructor(source) {
        this.recording = false;
        this.callbacks = [];
        this.context = source.context;
        this.bufferSize = 4096;
        this.input = this.context.createGain();
        source.connect(this.input);
        this.processor = this.context.createScriptProcessor(this.bufferSize, 1, 1);
        this.processor.onaudioprocess = this.process.bind(this);
        this.input.connect(this.processor);
        this.destination = this.context.createMediaStreamDestination();
        this.processor.connect(this.destination);
      }

      record() {
        this.recording = true;
      }

      stop() {
        this.recording = false;
      }

      exportWAV(callback) {
        this.callbacks.push(callback);
      }

      process(event) {
        if (!this.recording) return;

        const buffers = [];
        const channelData = event.inputBuffer.getChannelData(0);
        buffers.push(channelData);

        const blob = this.exportBuffer(buffers);
        this.callbacks.forEach(callback => callback(blob));
        this.callbacks = [];
      }

      exportBuffer(buffers) {
        const interleaved = this.interleave(buffers);
        const buffer = new ArrayBuffer(44 + interleaved.length * 2);
        const view = new DataView(buffer);

        // RIFF 标识符
        this.writeString(view, 0, 'RIFF');
        // RIFF 大小
        view.setUint32(4, 36 + interleaved.length, true);
        // WAV 标识符
        this.writeString(view, 8, 'WAVE');
        // fmt 标识符
        this.writeString(view, 12, 'fmt ');
        // fmt 大小
        view.setUint32(16, 16, true);
        // 音频格式
        view.setUint16(20, 1, true);
        // 声道数
        view.setUint16(22, 1, true);
        // 采样率
        view.setUint32(24, this.context.sampleRate, true);
        // 数据传输速率
        view.setUint32(28, this.context.sampleRate * 2, true);
        // 采样字节大小
        view.setUint16(32, 2, true);
        // 采样位数
        view.setUint16(34, 16, true);
        // data 标识符
        this.writeString(view, 36, 'data');
        // 数据大小
        view.setUint32(40, interleaved.length * 2, true);

        // 写入二进制数据
        const length = interleaved.length;
        let index = 44;
        for (let i = 0; i < length; i++) {
          view.setInt16(index, interleaved[i] * 32767, true);
          index += 2;
        }

        return new Blob([view], {type: 'audio/wav'});
      }

      interleave(input) {
        const length = input.reduce((acc, buffer) => acc + buffer.length, 0);
        const result = new Float32Array(length);
        const channels = input.length;

        for (let channel = 0; channel < channels; channel++) {
          const data = input[channel];
          for (let i = 0; i < data.length; i++) {
            result[channel + channels * i] = data[i];
          }
        }

        return result;
      }

      writeString(view, offset, string) {
        for (let i = 0; i < string.length; i++) {
          view.setUint8(offset + i, string.charCodeAt(i));
        }
      }
    }
  </script>
</body>
</html>

在此示例中,我们使用了 Recorder 类来处理录音。它使用 Web Audio API 和 JavaScript 中的数据类型来处理音频数据。getUserMedia() 方法用来获取麦克风流,AudioContext 用来处理音频,MediaStreamAudioSourceNode 用来将麦克风流转换为音频数据。我们还创建了一个音频元素并将其添加到页面,以便我们可以播放录制的音频。

猜你喜欢

转载自blog.csdn.net/song19990524/article/details/134746512