项目用wepy写的以 wepy为例
export default class extends wepy.app {
..............
//
在app.json文件中引入微信同声传译插件
plugins: {
WechatSI: {
version: '0.2.2',
provider: 'wx069ba97219f66d99'
}
}
}
<view
id="maikefeng"
catchtouchstart="streamRecord"
catchtouchend="end"
class="iconfont icon-maikefeng"
>
</view>
import wepy from 'wepy';
const plugin = requirePlugin('WechatSI'); // 引入语音识别插件
// 获取**全局唯一**的语音识别管理器**recordRecoManager**
const manager = plugin.getRecordRecognitionManager();
export default class speechtranslation extends wepy.component {
data = {
recording: false, // 正在录音
recordStatus: 0, // 状态: 0 - 录音中 1- 翻译中 2 - 翻译完成/二次翻译 暂无翻译需求
pic: '/images/microphone.gif'
};
methods = {
// 长按开始录音
streamRecord(e) {
manager.start();
this.recording = true
this.setData({
recordStatus: 0,
});
},
// 离开按钮录音结束
end() {
// 防止重复触发stop函数
if (!this.recording || this.recordStatus != 0) {
console.warn('has finished!');
return;
}
this.recording = false
// 停止录音
manager.stop();
}
};
onLoad() {
this.getRecordAuth();
// 绑定识别结束事件
manager.onStop = res => {
// 获取语音文字
let text = res.result;
if (text == '') {
this.showRecordEmptyTip();
return;
}
console.log(text);
// 向父组件传递获取到的文字信息
this.$emit('getText', text);
};
}
/**
* 识别内容为空时的反馈
*/
showRecordEmptyTip() {
this.setData({
recording: false,
bottomButtonDisabled: false
});
wx.showToast({
title: '请说话',
duration: 2000,
icon: 'success',
image: '/images/no_voice.png',
success: function(res) {},
fail: function(res) {
console.log(res);
}
});
}
// 权限询问
getRecordAuth() {
wx.getSetting({
success(res) {
console.log('succ');
console.log(res);
if (!res.authSetting['scope.record']) {
wx.authorize({
scope: 'scope.record',
success() {
// 用户已经同意小程序使用录音功能,后续调用 wx.startRecord 接口不会弹窗询问
console.log('succ auth');
},
fail() {
console.log('fail auth');
}
});
} else {
console.log('record has been authed');
}
},
fail(res) {
console.log('fail');
console.log(res);
}
});
}
}