微信小程序前台调用讯飞语音识别接口

开整
开整


之前我这边微信小程序调用讯飞的接口还是发一段音频到后台 再去连接讯飞的websocket 真的 贼慢 要是两三秒的还好 稍微长一点就GG
最近突然发现微信小程序有PCM格式了 所以就直接用小程序前台websocket连接了
代码在下面
讯飞账号的申请我就不说了
首先是微信小程序 先得录音 设置一下变量什么的

const app = getApp()
const recorderManager = wx.getRecorderManager();
var wxst; //语音websocket
var status = 0;  // 音频的状态
var iatResult = [] // 识别结果
const searchoptions = {
    
    
  duration: 60000,//指定录音的时长,单位 ms
  sampleRate: 8000,//采样率
  numberOfChannels: 1,//录音通道数
  encodeBitRate: 48000,//编码码率
  format: 'PCM',//音频格式
  frameSize: 5,//指定帧大小,单位 KB
}

然后就是来个点击事件 开启咱们的录音 获取讯飞鉴权的是个接口 因为之前后台写过 就用了 同时也可以后台记录一下使用的次数什么的
java这边的代码

public static String getAuthUrl(String hostUrl, String apiKey, String apiSecret) throws Exception {
    
    
        URL url = new URL(hostUrl);
        SimpleDateFormat format = new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss z", Locale.US);
        format.setTimeZone(TimeZone.getTimeZone("GMT"));
        String date = format.format(new Date());
        StringBuilder builder = new StringBuilder("host: ").append(url.getHost()).append("\n").//
                append("date: ").append(date).append("\n").//
                append("GET ").append(url.getPath()).append(" HTTP/1.1");
        //System.out.println(builder);
        Charset charset = Charset.forName("UTF-8");
        Mac mac = Mac.getInstance("hmacsha256");
        SecretKeySpec spec = new SecretKeySpec(apiSecret.getBytes(charset), "hmacsha256");
        mac.init(spec);
        byte[] hexDigits = mac.doFinal(builder.toString().getBytes(charset));
        String sha = Base64.getEncoder().encodeToString(hexDigits);

        //System.out.println(sha);
        String authorization = String.format("api_key=\"%s\", algorithm=\"%s\", headers=\"%s\", signature=\"%s\"", apiKey, "hmac-sha256", "host date request-line", sha);
        //System.out.println(authorization);
        HttpUrl httpUrl = HttpUrl.parse("https://" + url.getHost() + url.getPath()).newBuilder().//
                addQueryParameter("authorization", Base64.getEncoder().encodeToString(authorization.getBytes(charset))).//
                addQueryParameter("date", date).//
                addQueryParameter("host", url.getHost()).//
                build();
        return httpUrl.toString();
    }

开始录音的点击事件

start_say: function (e) {
    
     //开始录音按钮
    var that = this;
    wx.getSetting({
    
    //查看用户有没有开启语音权限
      success(res) {
    
    
        if (res.authSetting['scope.record']) {
    
    
          wx.authorize({
    
    
            scope: 'scope.record',
            success() {
    
    
              var xfurl = "";
              wx.request({
    
    //请求接口 获取讯飞语音鉴权
                url: 接口地址,
                method: "get",
                header: {
    
    
                  'content-type': 'application/json' // 默认值
                },
                success: function (res) {
    
    
                  if (res.data.code == "200" && res.data.data) {
    
    
                    xfurl = res.data.data;
                    wxst = wx.connectSocket({
    
     // 开启websocket连接
                      url: xfurl,
                      method: 'GET',
                      success: function (res) {
    
    
                        that.setData({
    
    //我这里是个遮罩层 开启他
                          shows: true,
                        })
                        recorderManager.start(searchoptions);//开始录音
                      }
                    });
                  } else {
    
    
                    wx.showToast({
    
    
                      title: '获取语音鉴权失败',
                      icon: 'none',
                      mask: true,
                      duration: 3000
                    })
                  }
                },
                fail: function () {
    
    
                  wx.showToast({
    
    
                    title: '获取语音鉴权失败',
                    icon: 'none',
                    mask: true,
                    duration: 3000
                  })
                }
              })
            },
            fail() {
    
    
              wx.showModal({
    
    
                title: '微信授权',
                content: '您当前未开启语音权限,请在右上角设置(···)中开启“录音功能”',
                showCancel: false,
                success(res) {
    
    
                  if (res.confirm) {
    
    
                    console.log('用户点击确定')
                  }
                }
              })
            }
          })
        }else{
    
    
          wx.showModal({
    
    
            title: '微信授权',
            content: '您当前未开启语音权限,请在右上角设置(···)中开启“录音功能”',
            showCancel: false,
            success(res) {
    
    
              if (res.confirm) {
    
    
                console.log('用户点击确定')
              }
            }
          })
        }
      }
    })
  }

关闭录音的点击事件

end_say: function () {
    
     //结束录音按钮
    var that = this;
    recorderManager.stop();
    that.setData({
    
    //关闭遮罩层
      shows: false,
    })
  }

之后就是关于录音和websocket的监听 我这边是先开启websocket 之后 再去录音
录音的监听

onShow: function() {
    
    
    var that = this;
    recorderManager.onStart(() => {
    
    //开始录音时触发
      status = 0;
      iatResult = []
      console.log('recorder start')
    });
    recorderManager.onError((res) => {
    
    //错误回调
      console.log(res);
    });
    recorderManager.onStop((res) => {
    
    //结束录音时触发
      console.log('recorder stop', res)
      status = 2;
      var sendsty = '{"data":{"status":2,"audio":"","format":"audio/L16;rate=8000","encoding":"raw"}}'
      wxst.send({
    
    
        data: sendsty
      })
    });
    recorderManager.onFrameRecorded((res) => {
    
    //每帧触发
      const {
    
     frameBuffer } = res
      var int16Arr = new Int8Array(res.frameBuffer);
      const base64 = wx.arrayBufferToBase64(int16Arr)
      switch (status) {
    
    
        case 0:
          status = 1;
          var sendsty = '{"common":{"app_id":"讯飞的appid"},"business":{"language":"zh_cn","domain":"iat","accent":"mandarin","dwa":"wpgs","vad_eos":1000},"data":{"status":0,"format":"audio/L16;rate=8000","encoding":"raw","audio":"' + base64 + '"}}'
          wxst.send({
    
    
            data: sendsty
          })
          break;
        case 1:
          var sendsty = '{"data":{"status":1,"format":"audio/L16;rate=8000","encoding":"raw","audio":"' + base64 + '"}}'
          wxst.send({
    
    
            data: sendsty
          })
          break;
        default:
          console.log("default");
      }
    })
  }

关于websocket的监听

onLoad: function(options) {
    
    
    var that = this;
    wx.onSocketOpen((res) => {
    
    // websocket打开
      console.log('监听到 WebSocket 连接已打开' + res);
    })
    wx.onSocketError((err) => {
    
    //连接失败
      console.log('websocket连接失败', err);
      wx.showToast({
    
    
        title: 'websocket连接失败',
        icon: 'none',
        duration: 2000,
        mask: false
      })
    })
    wx.onSocketMessage((res) => {
    
    //接收返回值
      var data = JSON.parse(res.data)
      if (data.code != 0) {
    
    
        console.log("error code " + data.code + ", reason " + data.message)
        return
      }
      let str = ""
      if (data.data.status == 2) {
    
    //最终识别结果
        // data.data.status ==2 说明数据全部返回完毕,可以关闭连接,释放资源
        wxst.close();
      } else {
    
    //中间识别结果
      }
      iatResult[data.data.result.sn] = data.data.result
      if (data.data.result.pgs == 'rpl') {
    
    
        data.data.result.rg.forEach(i => {
    
    
          iatResult[i] = null
        })
      }
      iatResult.forEach(i => {
    
    
        if (i != null) {
    
    
          i.ws.forEach(j => {
    
    
            j.cw.forEach(k => {
    
    
              str += k.w
            })
          })
        }
      })
      that.setData({
    
    
        searchKey: str //这个是中间的语音识别结果
      })
    })
    wx.onSocketClose((res) => {
    
    //WebSocket连接已关闭!
      var that = this;
      recorderManager.stop();
      that.setData({
    
    //把之前开开的遮罩层关上
        shows: false,
      })
      var str = that.data.searchKey;
      console.log(str);
      str = str.replace(/\s*/g, "");//去除空格
      if (str.substr(str.length - 1, 1) == "。") {
    
    //去除句号
        str = str.substr(0, str.length - 1);
      }
      that.setData({
    
    
        searchKey: str//这个是最后确定的语音识别结果
      })
      console.log('WebSocket连接已关闭!')
    })
  }


ok 大体上就是这样 试了一下 一般说完话1秒左右会全部识别完毕 还是挺好了 这样用户体验也不错
第一次写文章 有什么不好的地方大家一起交流 欢迎留言 奥力给
饮料杂货铺

猜你喜欢

转载自blog.csdn.net/qq_33525941/article/details/106257929