讯飞AndroidSDK文档:https://doc.xfyun.cn/msc_android/%E9%A2%84%E5%A4%87%E5%B7%A5%E4%BD%9C.html
讯飞语音SDK不支持android平台语音转写的功能(只支持java平台,当然android可以通过访问后台去请求),这里是通过语音听写来识别音频文件内容,支持音频长度≤60s(没有测试过音频文件这么长的)。
集成讯飞语音SDK到项目后,可以创建一个识别类,将下面代码复制到项目即可
创建SpeechRecognizer实例,一些初始化操作
@Override public void start(Context context) {
mRecognizer = SpeechRecognizer.createRecognizer(mContext, mInitListener);
mSpeechResult = new StringBuilder();
if (mRecognizer == null) {
return;
}
mUserList = UserManager.getUserList();
UserWords userWords = new UserWords();
//上传热词,比如用名,关键字,可提高识别准确度
for (User user : mUserList) {
userWords.putWord(user.getName());
}
mRecognizer.setParameter(SpeechConstant.ENGINE_TYPE, SpeechConstant.TYPE_CLOUD);
mRecognizer.setParameter(SpeechConstant.TEXT_ENCODING, "utf-8");
ret = mRecognizer.updateLexicon("userword", userWords.toString(), new LexiconListener() {
@Override public void onLexiconUpdated(String s, SpeechError error) {
if (error != null) {
LogUtil.e(error.toString());
} else {
LogUtil.e("success");
}
}
});
if (ret != ErrorCode.SUCCESS) mView.showTip("上传热词失败,错误码:" + ret);
}
设置识别参数:
//设置参数
public void setParam(String vadEos, String asrPtt, String asrPath) {
if (mRecognizer == null) {
return;
}
mView.stopPlayer();
mRecognizer.setParameter(SpeechConstant.PARAMS, null);
// 设置引擎
mRecognizer.setParameter(SpeechConstant.ENGINE_TYPE, SpeechConstant.TYPE_CLOUD);
// 设置返回结果格式
mRecognizer.setParameter(SpeechConstant.RESULT_TYPE, "json");
mRecognizer.setParameter(SpeechConstant.LANGUAGE, "zh_cn");
// 设置语言区域
mRecognizer.setParameter(SpeechConstant.ACCENT, "zh_cn");
// 设置语音前端点:静音超时时间,即用户多长时间不说话则当做超时处理
mRecognizer.setParameter(SpeechConstant.VAD_BOS, "4000");
// 设置语音后端点:后端点静音检测时间,即用户停止说话多长时间内即认为不再输入, 自动停止录音
mRecognizer.setParameter(SpeechConstant.VAD_EOS, vadEos);
// 设置标点符号,设置为"0"返回结果无标点,设置为"1"返回结果有标点
mRecognizer.setParameter(SpeechConstant.ASR_PTT, asrPtt);
mRecognizer.setParameter(SpeechConstant.AUDIO_FORMAT, "wav");
//是否保存识别语音文件
mRecognizer.setParameter(SpeechConstant.ASR_AUDIO_PATH, asrPath);
//设置音频资源,通过写音频流方式(-1),如果通过Android自带的录音机录制音频方式(可以不用设置)
mRecognizer.setParameter(SpeechConstant.AUDIO_SOURCE, "-1");
}
//设置参数完成后,开始识别
public void startRecognizer(File asrFile) {
ret = mRecognizer.startListening(mRecognizerListener);
if (ret != ErrorCode.SUCCESS) {
mView.showTip("听写失败,错误码:" + ret);
return;
}
if (asrFile == null) {
return;
}
final byte[] audioData = FileUtils.readFile(asrFile);
if (audioData != null) {
mRecognizer.writeAudio(audioData, 0, audioData.length);
mRecognizer.stopListening();
} else {
mRecognizer.cancel();
LogUtil.e("read audiorecord file failed!");
}
}
监听识别结果:
private RecognizerListener mRecognizerListener = new RecognizerListener() {
@Override public void onBeginOfSpeech() {
mView.showTip("开始说话");
mSpeechResult.delete(0, mSpeechResult.length());
}
@Override public void onEndOfSpeech() {
mView.showTip("结束说话");
}
@Override public void onResult(RecognizerResult results, boolean isLast) {
final String result = JsonParser.parseIatResult(results.getResultString());
mSpeechResult.append(result);
//可在这打印结果 mSpeechResult.toString();
}
@Override public void onError(SpeechError error) {
mView.showTip(error.getPlainDescription(true));
}
@Override public void onVolumeChanged(int volume, byte[] data) {
mView.showTip("当前正在说话,音量大小:" + volume);
}
@Override public void onEvent(int eventType, int arg1, int arg2, Bundle obj) {
}
};
FileUtils
public static byte[] readFile(File file) {
FileInputStream fis = null;
byte[] bytes = null;
try {
fis = new FileInputStream(file);
bytes = new byte[fis.available()];
fis.read(bytes);
} catch (Exception e) {
e.printStackTrace();
} finally {
if (fis != null) {
try {
fis.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
return bytes;
}