不带语音的就需要自定义语音识别时播放的动画.在调用的时候我们需要先判断麦克风的权限.
- (void)getUserMicrophonePermissions{
int flag;
AVAuthorizationStatus authStatus = [AVCaptureDeviceauthorizationStatusForMediaType:AVMediaTypeAudio];
if (authStatus ==AVAuthorizationStatusNotDetermined) {
// 没有询问是否开启麦克风
[[AVAudioSessionsharedInstance] requestRecordPermission:^(BOOL granted) {
// CALL YOUR METHOD HERE - as this assumes being called only once from user interacting with permission alert! //第一次询问用户是否进行授权
if (granted) {
// Microphone enabled code
[self speechSpeakAndWrite];
}
else {
// Microphone disabled code
[self showSetAlertView];
}
}];
}elseif (authStatus ==AVAuthorizationStatusRestricted || authStatus==AVAuthorizationStatusDenied){
//未授权,家长限制 玩家未授权
[selfshowSetAlertView];
}elseif (authStatus ==AVAuthorizationStatusAuthorized){
// 玩家授权
[selfspeechSpeakAndWrite];
}
}
//提示用户进行麦克风使用授权
- (void)showSetAlertView {
UIAlertController *alertVC = [UIAlertControlleralertControllerWithTitle:@"麦克风权限未开启" message:@"麦克风权限未开启,请进入系统【设置】>【隐私】>【麦克风】中打开开关,开启麦克风功能" preferredStyle:UIAlertControllerStyleAlert];
UIAlertAction *cancelAction = [UIAlertActionactionWithTitle:@"知道了" style:UIAlertActionStyleCancelhandler:^(UIAlertAction *_Nonnull action) {
}];
dispatch_async(dispatch_get_main_queue(), ^{
//这里面就可以实现有关当没有开启权限时需要呈现的页面
});
//下面的方法是如果需要跳转,则直接跳转设置 由于在点击去跳转设置后,回来会重新启动APP,而我也暂时没有想到一些解决办法,有些产品并不接受这,比如我们的产品,所以我就仿微信,直接不让跳转,一了百了!
// UIAlertAction *setAction = [UIAlertAction actionWithTitle:@"去设置" style:UIAlertActionStyleDefault handler:^(UIAlertAction * _Nonnull action) {
// //跳入当前App设置界面
// [[UIApplication sharedApplication] openURL:[NSURL URLWithString:UIApplicationOpenSettingsURLString]];
// }];
[alertVC addAction:cancelAction];
[selfpresentViewController:alertVC animated:YEScompletion:nil];
}
@property (nonatomic,strong) IFlySpeechRecognizer *iFlySpeechRecognizer;
- (void)speechSpeakAndWrite{
if (_iFlySpeechRecognizer ==nil) {
[self initRecognizer];
}
BOOL ret = [_iFlySpeechRecognizerstartListening];
if (ret) {
//启动识别服务成功
dispatch_async(dispatch_get_main_queue(), ^{
//因为语音的sdk是在多线程里面进行语音识别的,这就需要我们在主线程里面进行UI的显示,否则UI的页面是不会显示出来的,好多sdk都是这样的,需注意这点
});
}else{
//启动识别服务失败
[MBProgressHUDshowMessage:@"启动识别服务失败"];
}
}
- (void)initRecognizer{
if (_iFlySpeechRecognizer ==nil) {
_iFlySpeechRecognizer = [IFlySpeechRecognizersharedInstance];
}
if (_iFlySpeechRecognizer !=nil) {
[_iFlySpeechRecognizersetParameter:@""forKey:[IFlySpeechConstantPARAMS]];
//设置听写模式
[_iFlySpeechRecognizersetParameter:@"iat"forKey:[IFlySpeechConstantIFLY_DOMAIN]];
//设置最长录音时间
[_iFlySpeechRecognizersetParameter:@"10000"forKey:[IFlySpeechConstantSPEECH_TIMEOUT]];
//设置后端点 sdk中的后端点是3秒,意思就是3秒没有接收到信息,在线识别的sdk将停止接收
[_iFlySpeechRecognizersetParameter:@"3000"forKey:[IFlySpeechConstantVAD_EOS]];
//设置前端点 sdk中的前端点是3秒,意思就是5秒没有接收到信息,将停止接收说话
[_iFlySpeechRecognizersetParameter:@"5000"forKey:[IFlySpeechConstantVAD_BOS]];
//网络等待时间
[_iFlySpeechRecognizersetParameter:@"10000"forKey:[IFlySpeechConstantNET_TIMEOUT]];
//设置采样率,推荐使用16K
[_iFlySpeechRecognizersetParameter:@"16000"forKey:[IFlySpeechConstantSAMPLE_RATE]];
//设置语言
[_iFlySpeechRecognizersetParameter:@"zh_cn"forKey:[IFlySpeechConstantLANGUAGE]];
//设置方言
[_iFlySpeechRecognizersetParameter:@"mandarin"forKey:[IFlySpeechConstantACCENT]];
//设置是否返回标点符号
[_iFlySpeechRecognizersetParameter:@"0"forKey:[IFlySpeechConstantASR_PTT]];
//设置数据返回格式
[_iFlySpeechRecognizersetParameter:@"plain"forKey:[IFlySpeechConstantRESULT_TYPE]];
//设置音频来源为麦克风
[_iFlySpeechRecognizersetParameter:IFLY_AUDIO_SOURCE_MICforKey:@"audio_source"];
//设置听写结果格式为json
[_iFlySpeechRecognizersetParameter:@"plain"forKey:[IFlySpeechConstantRESULT_TYPE]];
//保存录音文件,保存在sdk工作路径中,如未设置工作路径,则默认保存在library/cache下
[_iFlySpeechRecognizersetParameter:@"asr.pcm"forKey:[IFlySpeechConstantASR_AUDIO_PATH]];
[_iFlySpeechRecognizersetDelegate:self];
}
}
#pragma mark - IFlySpeechRecognizerDelegate
注意在有界面的代理方法里面是 - (void)onResult:(NSArray *)resultArray isLast:(BOOL)isLast
这里是onResults不一样
- (void) onResults:(NSArray *) results isLast:(BOOL)isLast
{
NSMutableString *result = [[NSMutableStringalloc] init];
NSDictionary *dic = [resultsobjectAtIndex:0];
NSLog(@"result279 = %@",result);
for (NSString *keyin dic) {
[result appendFormat:@"%@",key];
}
if (!isLast) {
_content = result;
}else{
NSLog(@"result248 = %@",_content);
}
}
接下来我说有关语音合成的...