uni-app speech-to-text function demo (simultaneous translation of the mini program is available out of the box)

Table of contents

1. Application for simultaneous translation plug-in

2. References in uni-app


1. Application for simultaneous translation plug-in

Mini program developer official website: WeChat public platform

 

 

 

 

 

 After clicking on the Mini Program Management Backend, click Settings again and select Third-Party Services

 Search for simultaneous interpretation and add the plug-in to your own mini program service

 Click on details to see the official documentation and AppId (will be used in subsequent uses)

2. References in uni-app

After creating a new project, select the manifest.json file and switch to the source code view (at the bottom of the right menu bar!)

 Write the relevant information about the plug-in at the same level as the appid of mp--weixin (don’t read it wrong, there is an AppID above)

"mp-weixin": {
		/* 小程序特有相关 */
		"appid": "你自己的小程序id",
		"plugins": {
			"WechatSI": {
				"version": "0.3.3",
				"provider": "wx069ba97219f66d99"
			}
		},
		"setting": {
			"urlCheck": false
		},
		"usingComponents": true
	},

pay attention! ! ! ! ! ! ! ! ! ! The best version version is 0.3.3. Versions higher than this may not be usable, and the providers should all be this, so copy and use!

<template>
	<view>
		<!-- 语音识别区 -->
		<!-- 语音内容图片+转文字,没有语音时是隐藏状态 -->
		<view class="content" v-if="shows">
			<!-- 语音声纹图片 -->
			<view class="tet" style="position: relative;">
				<view style="position: absolute;width: 30px;height: 30px;background-color: #e9c2db;right: 0;border-radius: 50%;top: 10px;" :class="num == 1 ? '' : 'op'">
					<!-- 转文字按钮  fanyi为转译事件 -->
					<image style="width: 30px;height: 30px;" src="https://pic.imgdb.cn/item/64c85b431ddac507cc0e7284.png" @tap="fanyi"></image>
				</view>
			</view>
			<!-- 翻译内容点击fanyi后出现 -->
			<view class="voicepad" style="margin-top: 20px;width: 94%;margin-left: 3%;padding: 10px; font-size: 12px;">
				{
   
   {endOne}}
			</view>
		</view>
		<!-- 语音音阶动画 长按说话时的动画 -->
		<view class="prompt" v-if="animation">
			<section class="dots-container">
				<view class="dot"></view>
				<view class="dot"></view>
				<view class="dot"></view>
				<view class="dot"></view>
				<view class="dot"></view>
			</section>
			<text>录音中...</text>
		</view>
		<!-- 按钮功能区 -->
		<view class="action" v-show="!shows">
			<!-- 开始录音按钮  长按录音  松手上传 text为-----开始录音----上传录音文字 -->
			<button @longpress="startRecord" @touchend="endRecord" @tap="startRecord">{
   
   {text}}</button>
		</view>
		<view class="actioning" v-show="shows">
			<button @tap="playVoice" style="background-color: #d1f2d7;color: #18bc37;">播放录音</button>
			<button @tap="resetVoice" style="background-color: #fdedd9;color: #f3a73f;">重置录音</button>
		</view>

	</view>
</template>
<script>
	// 录音播放事件引入
	const innerAudioContext = uni.createInnerAudioContext();
	innerAudioContext.autoplay = true;
	// 翻译事件引入
	var plugin = requirePlugin("WechatSI")
	let manager = plugin.getRecordRecognitionManager()

	export default {
		data() {
			return {
				text: '开始录音',
				voicePath: '',
				// 音频展示
				shows: false,
				// 动画状态
				animation: false,
				voiceState: "点击录音进行翻译",
				endOne: '',
				num: 1

			}
		},
		onShow() {

		},
		onLoad() {
			// 初始化调用
			this.initRecord();
		},
		methods: {
			startRecord() {
				console.log('开始录音');
				manager.start({
					duration: 30000,
					lang: "zh_CN"
				})
				this.text = '松手上传';
				this.animation = true
			},
			endRecord() {
				console.log('录音结束');
				this.text = '开始录音';
				this.animation = false
				this.shows = true
				manager.stop();
			},
			playVoice() {
				console.log('播放录音');
				if (this.voicePath) {
					innerAudioContext.src = this.voicePath;
					innerAudioContext.play();
				}
			},
			resetVoice() {
				console.log('重置录音');
				innerAudioContext.stop();
				this.shows = false
				this.voicePath = '';
				this.endOne = ''
				this.voiceState = ''
				this.num = 1
			},
			fanyi() {
				if (this.num == 1) {
					console.log(this.voicePath);
					this.endOne = this.voiceState
					this.num = this.num + 1
					uni.showToast({
						title: '转换成功',
						icon: 'success',
						duration: 2000, //持续时间为 2秒
					})
				}else{
					uni.showToast({
						title: '文字已翻译,请勿重复点击',
						icon: 'error',
						duration: 2000, //持续时间为 2秒
					})
				}
			},
			/**
			 * 初始化语音识别回调  
			 * 绑定语音播放开始事件  
			 */
			initRecord: function() {
				manager.onStart = function(res) {
					this.voiceState = "onStart:" + res.msg + "正在录音"
				};
				//有新的识别内容返回,则会调用此事件  
				manager.onRecognize = (res) => {
					this.voiceState = res.result;
				}
				// 识别结束事件  
				manager.onStop = (res) => {
					this.voicePath = res.tempFilePath;
					this.voiceState = res.result;
					if (this.voiceState == '') {
						console.log('没有说话')
						this.endOne = '周围太安静啦~再试试~';
					}
				}

				// 识别错误事件  
				manager.onError = (res) => {
					this.voiceState = '翻译员去吃饭啦,等下再试试';
					this.shows = false
					uni.showToast({
						title: '翻译员去吃饭啦,等下再试试',
						icon: 'error',
						duration: 2000, //持续时间为 2秒
					})
				}
			},
		}
	}
</script>
<style>
	.content {
		box-sizing: border-box;
		width: 98%;
		margin-left: 1%;
		min-height: 300px;
		padding-top: 20px;
		padding-left: 15px;
		padding-right: 15px;
		padding-bottom: 20px;
		display: flex;
		flex-direction: column;
		align-items: center;
	}

	.tet {
		width: 100%;
		height: 50px;
		margin-top: 25px;
		border-radius: 30px;
		background-repeat: no-repeat;
		background-size: 100% 100%;
		background-image: url('https://pic.imgdb.cn/item/64c85a901ddac507cc0d52a4.png');
		position: relative;
	}

	.action {
		position: fixed;
		bottom: 20px;
		width: 100%;
	}

	.action button {
		background-color: #d1f2d7;
		color: #18bc37;
		font-size: 14px;
		display: flex;
		height: 40px;
		width: 96%;
		margin-left: 2%;
		align-items: center;
		justify-content: center;
	}

	.actioning {
		position: fixed;
		bottom: 20px;
		width: 100%;
		display: flex;
		align-items: center;
		justify-content: space-between;
	}

	.actioning button {
		height: 40px;
		width: 45%;
		border: 0;
		font-size: 14px;
		display: flex;
		align-items: center;
		justify-content: center;
	}

	.bbig {
		width: 94%;
	}
	
	.op{
		visibility: hidden;
	}
	/* 动画 */
	.prompt {
		width: 100%;
		height: 80px;
		position: fixed;
		bottom: 70px;
	}

	.prompt text {
		position: absolute;
		bottom: 2px;
		color: #f3a73f;
		left: calc(45%);
		animation: puls 1.5s infinite ease-in-out;
	}

	.dots-container {
		display: flex;
		align-items: center;
		justify-content: center;
		height: 80px;
		width: 45%;
		position: absolute;
		bottom: 0px;
		left: calc(27.5%);
		background-color: #fdedd9;
		border-radius: 20px;
	}

	.dot {
		height: 16px;
		width: 16px;
		margin-right: 10px;
		border-radius: 10px;
		background-color: #f3a73f;
		animation: pulse 1.5s infinite ease-in-out;
	}

	.dot:last-child {
		margin-right: 0;
	}

	.dot:nth-child(1) {
		animation-delay: -0.3s;
	}

	.dot:nth-child(2) {
		animation-delay: -0.1s;
	}

	.dot:nth-child(3) {
		animation-delay: 0.1s;
	}

	@keyframes pulse {
		0% {
			transform: scale(0.8);
			background-color: #f3a73f;
			box-shadow: 0 0 0 0 rgba(243, 167, 63, 0.7);
		}

		50% {
			transform: scale(1.2);
			background-color: #f9d39f;
			box-shadow: 0 0 0 10px rgba(178, 212, 252, 0);
		}

		100% {
			transform: scale(0.8);
			background-color: #f3a73f;
			box-shadow: 0 0 0 0 rgba(243, 167, 63, 0.7);
		}
	}

	@keyframes puls {
		0% {
			transform: translateY(0px)
		}

		50% {
			transform: translateY(-4px)
		}

		100% {
			transform: translateY(0px)
		}
	}
</style>

At this point, simultaneous translation can be used. This article mainly uses two options: playing the recording and translating the recording after the recording is completed. It can also be modified to support simultaneous translation.

The demo can be copied and used. You only need to modify the pictures. If an error occurs when running on a real machine, it is recommended to switch to the real machine 1.0 for testing.

Guess you like

Origin blog.csdn.net/qq_17189095/article/details/132046938
Recommended