iOS 仿微信语音输入动画

版权声明:本文为博主原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。
本文链接: https://blog.csdn.net/sinat_30162391/article/details/83574720

这篇是接着上一篇文章, 完成一个随着语音输入大小的变化, 而变化的动画.

//
//  PBSpeechRecognizer.h
//  ParkBest
//
//  Created by summerxx27 on 2018/10/30.
//  Copyright © 2018年 summerxx27. All rights reserved.
//
#import <Foundation/Foundation.h>

NS_ASSUME_NONNULL_BEGIN
@protocol PBSpeechRecognizerProtocol <NSObject>
@optional
- (void)recognitionSuccess:(NSString *)result;
- (void)recognitionFail:(NSString *)result;
- (void)level:(float)value;
@end
@interface PBSpeechRecognizer : NSObject
@property(nonatomic,weak) id<PBSpeechRecognizerProtocol> delegate;
- (void)startR;
- (void)stopR;
@end

NS_ASSUME_NONNULL_END

//
//  PBSpeechRecognizer.m
//  ParkBest
//
//  Created by summerxx27 on 2018/10/30.
//  Copyright © 2018年 summerxx27. All rights reserved.
//

#import "PBSpeechRecognizer.h"
#import <Speech/Speech.h>
API_AVAILABLE(ios(10.0))
@interface PBSpeechRecognizer()
@property (nonatomic, strong) AVAudioEngine *audioEngine;
@property (nonatomic, strong) SFSpeechRecognizer *speechRecognizer;
@property (nonatomic, strong) SFSpeechAudioBufferRecognitionRequest *recognitionRequest;
@property (nonatomic, strong) AVAudioRecorder *recorder;
@property (nonatomic, strong) NSTimer *levelTimer;
@end
@implementation PBSpeechRecognizer

- (void)startR {
    if (!self.speechRecognizer) {
        // 设置语言
        NSLocale *locale = [NSLocale localeWithLocaleIdentifier:@"zh-CN"];
        if (@available(iOS 10.0, *)) {
            self.speechRecognizer = [[SFSpeechRecognizer alloc] initWithLocale:locale];
        } else {
            // Fallback on earlier versions
        }
    }
    if (!self.audioEngine) {
        self.audioEngine = [[AVAudioEngine alloc] init];
    }
    AVAudioSession *audioSession = [AVAudioSession sharedInstance];
    if (@available(iOS 10.0, *)) {
        [audioSession setCategory:AVAudioSessionCategoryRecord mode:AVAudioSessionModeMeasurement options:AVAudioSessionCategoryOptionDuckOthers error:nil];
    } else {
        // Fallback on earlier versions
    }
    [audioSession setActive:YES withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation error:nil];
    if (self.recognitionRequest) {
        [self.recognitionRequest endAudio];
        self.recognitionRequest = nil;
    }
    if (@available(iOS 10.0, *)) {
        self.recognitionRequest = [[SFSpeechAudioBufferRecognitionRequest alloc] init];
    } else {
        // Fallback on earlier versions
    }
    self.recognitionRequest.shouldReportPartialResults = YES; // 实时翻译
    if (@available(iOS 10.0, *)) {
        [self.speechRecognizer recognitionTaskWithRequest:self.recognitionRequest resultHandler:^(SFSpeechRecognitionResult * _Nullable result, NSError * _Nullable error) {
            if (result.isFinal) {
                NSLog(@"is final: %d  result: %@", result.isFinal, result.bestTranscription.formattedString);
                if ([self.delegate respondsToSelector:@selector(recognitionSuccess:)]) {
                    [self.delegate recognitionSuccess:result.bestTranscription.formattedString];
                }
            }else {
                if ([self.delegate respondsToSelector:@selector(recognitionFail:)]) {
//                    [self.delegate recognitionFail:error.domain];
                }
            }
        }];
    } else {
        // Fallback on earlier versions
    }
    AVAudioFormat *recordingFormat = [[self.audioEngine inputNode] outputFormatForBus:0];
    [[self.audioEngine inputNode] installTapOnBus:0 bufferSize:1024 format:recordingFormat block:^(AVAudioPCMBuffer * _Nonnull buffer, AVAudioTime * _Nonnull when) {
        [self.recognitionRequest appendAudioPCMBuffer:buffer];
    }];
    [self.audioEngine prepare];
    [self.audioEngine startAndReturnError:nil];
    
    
    /// 检测声音
    [[AVAudioSession sharedInstance]
     setCategory: AVAudioSessionCategoryPlayAndRecord error: nil];
    
    /// 不需要保存录音文件
    NSURL *url = [NSURL fileURLWithPath:@"/dev/null"];
    
    NSDictionary *settings = [NSDictionary dictionaryWithObjectsAndKeys:
                              [NSNumber numberWithFloat: 44100.0], AVSampleRateKey,
                              [NSNumber numberWithInt: kAudioFormatAppleLossless], AVFormatIDKey,
                              [NSNumber numberWithInt: 2], AVNumberOfChannelsKey,
                              [NSNumber numberWithInt: AVAudioQualityMax], AVEncoderAudioQualityKey,
                              nil];
    
    NSError *error;
    _recorder = [[AVAudioRecorder alloc] initWithURL:url settings:settings error:&error];
    if (_recorder)
    {
        [_recorder prepareToRecord];
        _recorder.meteringEnabled = YES;
        [_recorder record];
        _levelTimer = [NSTimer scheduledTimerWithTimeInterval: 1 target: self selector: @selector(levelTimerCallback:) userInfo: nil repeats: YES];
    }
    else
    {
        NSLog(@"%@", [error description]);
    }

    
}
/// 开始语音输入后, 开启一个定时器, 来检测声音的大小
- (void)levelTimerCallback:(NSTimer *)timer {
    [_recorder updateMeters];
    
    float   level;                // The linear 0.0 .. 1.0 value we need.
    float   minDecibels = -80.0f; // Or use -60dB, which I measured in a silent room.
    float   decibels    = [_recorder averagePowerForChannel:0];
    
    if (decibels < minDecibels)
    {
        level = 0.0f;
    }
    else if (decibels >= 0.0f)
    {
        level = 1.0f;
    }
    else
    {
        float   root            = 2.0f;
        float   minAmp          = powf(10.0f, 0.05f * minDecibels);
        float   inverseAmpRange = 1.0f / (1.0f - minAmp);
        float   amp             = powf(10.0f, 0.05f * decibels);
        float   adjAmp          = (amp - minAmp) * inverseAmpRange;
        
        level = powf(adjAmp, 1.0f / root);
    }
    
    /// level 范围[0 ~ 1], 转为[0 ~120] 之间
    /// 通过这个delegate来回调到使用的类中
    if ([self.delegate respondsToSelector:@selector(level:)]) {
        [self.delegate level:120 * level];
    }
}

- (void)stopR {
    [_levelTimer invalidate];
    [[self.audioEngine inputNode] removeTapOnBus:0];
    [self.audioEngine stop];
    [self.recognitionRequest endAudio];
    self.recognitionRequest = nil;
}
@end

通过Value的值来动态切换图片就可以了, 或者不使用图片而自己绘制话筒旁边的小横线.

- (void)level:(float)value {
    if (0 < value && value < 10) {
        _voiceView.image = [UIImage imageNamed:@"v_1"];
    }else if (value > 10 && value < 20) {
        _voiceView.image = [UIImage imageNamed:@"v_2"];

    }else if (value > 20 && value < 25) {
        _voiceView.image = [UIImage imageNamed:@"v_3"];

    }else if (value > 25 && value < 35) {
        _voiceView.image = [UIImage imageNamed:@"v_4"];

    }else if (value > 35 && value < 45) {
        _voiceView.image = [UIImage imageNamed:@"v_5"];

    }else if (value > 45 ) {
        _voiceView.image = [UIImage imageNamed:@"v_6"];
    }
}

这里是长按方法

- (void)longPress:(UILongPressGestureRecognizer *)gestureRecognizer{
    CGPoint point = [gestureRecognizer locationInView:self.view];
    if(gestureRecognizer.state == UIGestureRecognizerStateBegan) {
        [self startRecording];
    } else if(gestureRecognizer.state == UIGestureRecognizerStateEnded) {
        [self stopRecording];
        
    } else if(gestureRecognizer.state == UIGestureRecognizerStateChanged) {
        NSLog(@"y ========== %f", point.y);
        /// 判断y滑动到一定的值, 且取消语音的识别, 这里可以通过逻辑简单控制下
        if (point.y < 513) {
            _cancel = @"yes";
            NSLog(@"voice cencel");
        }
    } else if (gestureRecognizer.state == UIGestureRecognizerStateFailed) {
        
    } else if (gestureRecognizer.state == UIGestureRecognizerStateCancelled) {
        
    }
}

当然这里是一个简单的模拟, 更多细节待完善, 看似简单,实则不然. sad

猜你喜欢

转载自blog.csdn.net/sinat_30162391/article/details/83574720