uniapp implements speech recognition

Preface: This article is mainly to call the recording and pass the recording file to the backend for identification

app, WeChat applet realizes recording function

API used - uni.getRecorderManager()

uni.getRecorderManager() | uni-app official website

api is not compatible in H5

button

     <!--按钮-->
		<view  class="bottom flex-column">
			<view class="tip flex-column">请按住说话</view>
			<view class="btns flex-row">
				<view @touchstart="startSay" @touchend="endSay" class="button flex-column">
					<text class="iconfont icon-voice"></text>
				</view>
			</view>
		</view>

Create initialization event and monitor recording success

<script>
// #ifdef MP-WEIXIN
const recorderManager = uni.getRecorderManager();
// #endif

export default{
   data() {
        return {
            voicePath: '', //录音文件
        }
   }
   onLoad() {
		this.interValTime = 100
	    let self = this;
        // #ifdef MP-WEIXIN
		recorderManager.onStop(function (res) {
			self.voicePath = res.tempFilePath;
			self.getData();
		});			
        // #endif
   },
   methods: {
             let token = uni.getStorageasync('token')
             uni.uploadFile({
						url: 'resume/voiceToText',
						name: 'file',
						filePath: this.voicePath,
						header: {
							'Authorization': `bearer ${token}`
						},
						success: (res) => {
							
						},
						fail: (err) => {
						
						}
					})
    }
</script>

Start recording and stop recording

           startSay() {
				// #ifdef MP-WEIXIN
				recorderManager.start();
				// #endif
			},
          endSay() {	
				// #ifdef MP-WEIXIN
				recorderManager.stop();
				// #endif
				
			},

H5 achieves recording function

The native api of uniapp does not support h5, so to record h5, you need to use a plug-in

The plug-in is passed, not only unipp, other projects can still be used

npm install recorder-core

use

   // #ifdef H5
	import Recorder from 'recorder-core'
	import 'recorder-core/src/engine/wav.js'
	// #endif
      data() {
       return {
         rec: null
       }
      },
      onLoad() {
			// #ifdef H5
			this.initRecords()
			// #endif
		},
          initRecords() {
				var rec = this.rec = Recorder({
					type: "wav",
					bitRate: 16,
					sampleRate: 32000,
				});
				rec.open(() => {}, () => {
					uni.showToast({
						title: '获取手机录音权限失败',
						icon: 'none'
					})
				});
			},

 

          startSay() {
				// #ifdef H5
				if (!this.rec) {
					return;
				}
				this.rec.start();
				// #endif
			},
			endSay() {
				// #ifdef H5
				if (!this.rec) {
					return;
				}
				this.rec.stop((blob, duration) => {
					const file = new File([blob], 'record.wav', {
						type: blob.type
					})
					this.voicePath = file
					this.getData()
				})
				// #endif
			}

getData also uploads files to the backend

Guess you like

Origin blog.csdn.net/qq_42625428/article/details/124323237