干货 应用阿里AI一句话识别 java 实现语音实时识别

首次讲讲思路,如果说不定大佬们也可以自己完成  ,如果大佬们懒得弄那就直接

看这里看这里

上面我已经实现了语音的实时录入检测识别并且附带了录入和识别的子项目

1.)本地语音的实时录入、并检测是否有语音录入判断是否休眠

package com.wqc.sound;

import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;

import javax.sound.sampled.AudioFileFormat;
import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioInputStream;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.DataLine;
import javax.sound.sampled.TargetDataLine;

public class EngineeCore {

    String filePath = "E:\\1jAVE\\ECLIPSE\\obj\\nls-example-recognizer\\src\\main\\resources\\voice_cache.wav";

    AudioFormat audioFormat;
    TargetDataLine targetDataLine;
    boolean flag = true;


private void stopRecognize() {
        flag = false;
        targetDataLine.stop();
        targetDataLine.close();
    }private AudioFormat getAudioFormat() {
        float sampleRate = 16000;
        // 8000,11025,16000,22050,44100
        int sampleSizeInBits = 16;
        // 8,16
        int channels = 1;
        // 1,2
        boolean signed = true;
        // true,false
        boolean bigEndian = false;
        // true,false
        return new AudioFormat(sampleRate, sampleSizeInBits, channels, signed, bigEndian);
    }// end getAudioFormat


    void startRecognize() {
        try {
            // 获得指定的音频格式
            audioFormat = getAudioFormat();
            DataLine.Info dataLineInfo = new DataLine.Info(TargetDataLine.class, audioFormat);
            targetDataLine = (TargetDataLine) AudioSystem.getLine(dataLineInfo);

            // Create a thread to capture the microphone
            // data into an audio file and start the
            // thread running. It will run until the
            // Stop button is clicked. This method
            // will return after starting the thread.
            flag = true;
            new CaptureThread().start();
        } catch (Exception e) {
            e.printStackTrace();
        } // end catch
    }// end captureAudio method

    class CaptureThread extends Thread {
        public void run() {
            @SuppressWarnings("unused")
			AudioFileFormat.Type fileType = null;
            File audioFile = new File(filePath);
            boolean has= new File(filePath).exists();
            if(has) {
            	audioFile.delete();
            }
            fileType = AudioFileFormat.Type.WAVE;
            //声音录入的权值
            int weight = 2;
            //判断是否停止的计数
            int downSum = 0;

            ByteArrayInputStream bais = null;
            ByteArrayOutputStream baos = new ByteArrayOutputStream();
            AudioInputStream ais = null;
            try {
                targetDataLine.open(audioFormat);
                targetDataLine.start();
                byte[] fragment = new byte[1024];

                ais = new AudioInputStream(targetDataLine);
                while (flag) {

                    targetDataLine.read(fragment, 0, fragment.length);
                    //当数组末位大于weight时开始存储字节(有声音传入),一旦开始不再需要判断末位
                    if (Math.abs(fragment[fragment.length-1]) > weight || baos.size() > 0) {
                        baos.write(fragment);
                        System.out.println("守卫:"+fragment[0]+",末尾:"+fragment[fragment.length-1]+",lenght"+fragment.length);
                        //判断语音是否停止
                        if(Math.abs(fragment[fragment.length-1])<=weight){
                            downSum++;
                        }else{
                            System.out.println("重置奇数");
                            downSum=0;
                        }//计数超过20说明此段时间没有声音传入(值也可更改)
                        if(downSum>20){
                            System.out.println("停止录入");
                            break;
                        }

                    }
                }

                //取得录音输入流
                audioFormat = getAudioFormat();
                byte audioData[] = baos.toByteArray();
                bais = new ByteArrayInputStream(audioData);
                ais = new AudioInputStream(bais, audioFormat, audioData.length / audioFormat.getFrameSize());
                //定义最终保存的文件名
                System.out.println("开始生成语音文件");
                AudioSystem.write(ais, AudioFileFormat.Type.WAVE, audioFile);
                downSum = 0;
                stopRecognize();

            } catch (Exception e) {
                e.printStackTrace();
            } finally {
                //关闭流

                try {
                    ais.close();
                    bais.close();
                    baos.reset();
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }

        }// end run
    }// end inner class CaptureThread
}

2.)接下来是阿里的语音识别的系统

package com.wqc.sound;

import java.io.File;
import java.io.InputStream;

import javax.sound.sampled.AudioFileFormat;
import javax.sound.sampled.AudioInputStream;
import javax.sound.sampled.AudioSystem;

import com.alibaba.nls.client.protocol.InputFormatEnum;
import com.alibaba.nls.client.protocol.NlsClient;
import com.alibaba.nls.client.protocol.SampleRateEnum;
import com.alibaba.nls.client.protocol.asr.SpeechRecognizer;
import com.alibaba.nls.client.protocol.asr.SpeechRecognizerListener;
import com.alibaba.nls.client.protocol.asr.SpeechRecognizerResponse;

/**
 * @author zhishen.ml
 * @date 2018-06-12
 */
public class SpeechRecognizerDemo {
    private String appKey;
    private String accessToken;
    NlsClient client;

    public SpeechRecognizerDemo(String appKey, String token) {
        this.appKey = appKey;
        this.accessToken = token;
        //创建NlsClient实例,应用全局创建一个即可,默认服务地址为阿里云线上服务地址
        client = new NlsClient(accessToken);
    }

    public SpeechRecognizerDemo(String appKey, String token, String url) {
        this.appKey = appKey;
        this.accessToken = token;
        //创建NlsClient实例,应用全局创建一个即可,用户指定服务地址
        client = new NlsClient(url, accessToken);
    }

    private static SpeechRecognizerListener getRecognizerListener() {
        SpeechRecognizerListener listener = new SpeechRecognizerListener() {
            //识别出中间结果.服务端识别出一个字或词时会返回此消息.仅当setEnableIntermediateResult(true)时,才会有此类消息返回
            @Override
            public void onRecognitionResultChanged(SpeechRecognizerResponse response) {
                //事件名称 RecognitionResultChanged
                System.out.println("name22222222: " + response.getName() +
                    //状态码 20000000 表示识别成功
                    ", status2222222: " + response.getStatus() +
                    //语音识别文本
                    ", result22222222: " + response.getRecognizedText());
            }

            //识别完毕
            @Override
            public void onRecognitionCompleted(SpeechRecognizerResponse response) {
                //事件名称 RecognitionCompleted
                System.out.println("name11111111: " + response.getName() +
                    //状态码 20000000 表示识别成功
                    ", status111111: " + response.getStatus() +
                    //语音识别文本
                    ", result1111111: " + response.getRecognizedText());
            }

            @Override
            public void onStarted(SpeechRecognizerResponse response) {
                System.out.println(
                    "task_id: " + response.getTaskId());
            }

            @Override
            public void onFail(SpeechRecognizerResponse response) {
                System.out.println(
                    "task_id: " + response.getTaskId() +
                        //状态码 20000000 表示识别成功
                        ", status: " + response.getStatus() +
                        //错误信息
                        ", status_text: " + response.getStatusText());

            }
        };
        return listener;
    }

    public void process(InputStream ins) {
        SpeechRecognizer recognizer = null;
        try {
            //创建实例,建立连接
            recognizer = new SpeechRecognizer(client, getRecognizerListener());
            recognizer.setAppKey(appKey);
            //设置音频编码格式
            recognizer.setFormat(InputFormatEnum.PCM);
            //设置音频采样率
            recognizer.setSampleRate(SampleRateEnum.SAMPLE_RATE_16K);
            //设置是否返回中间识别结果
            recognizer.setEnableIntermediateResult(true);

            //此方法将以上参数设置序列化为json发送给服务端,并等待服务端确认
            recognizer.start();
            //语音数据来自声音文件用此方法,控制发送速率;若语音来自实时录音,不需控制发送速率直接调用 recognizer.sent(ins)即可
            recognizer.send(ins, 3200, 100);
            //通知服务端语音数据发送完毕,等待服务端处理完成
            recognizer.stop();

        } catch (Exception e) {
            System.err.println(e.getMessage());
        } finally {
            //关闭连接
            if (null != recognizer) {
                recognizer.close();
            }
        }
    }

    public void shutdown() {
        client.shutdown();
    }

    public static void main(String[] args) throws Exception {
        String appKey = null;
        String token = null;
        String url = null;
        SpeechRecognizerDemo demo = null;
       // while (true) {
//        		EngineeCore engineeCore = new EngineeCore();
//
//        		engineeCore.startRecognize();
        		String filePath = "E:\\1jAVE\\ECLIPSE\\obj\\nls-example-recognizer\\src\\main\\resources\\nls-sample-16k.wav";
  	            File audioFile = new File(filePath);
               //boolean has= new File(filePath).exists();
        		//if (has) {
        			 if (args.length == 2) {
     	                appKey = args[0];
     	                token = args[1];
     	                //default url is wss://nls-gateway.cn-shanghai.aliyuncs.com/ws/v1
     	                demo = new SpeechRecognizerDemo(appKey, token);
     	            } else if (args.length == 3) {
     	                appKey = args[0];
     	                token = args[1];
     	                url = args[2];
     	                demo = new SpeechRecognizerDemo(appKey, token, url);
     	            } else {
     	                System.err.println("SpeechRecognizerDemo need params(url is optional): " +
     	                    "<app-key> <token> [<url>]");
     	                System.exit(-1);
     	            }
     	           
        			 AudioInputStream ins=AudioSystem.getAudioInputStream(audioFile);
     	           // InputStream ins = SpeechRecognizerDemo.class.getResourceAsStream("/voice_cache.wav");
     	            if (null == ins) {
     	                System.err.println("open the audio file failed!");
     	                System.exit(-1);
     	            }
     	            demo.process(ins);
     	            demo.shutdown();
				}
	           
			
		//}
       
   // }

}

上面两个子项目是以独立文件保存和读取的形式来实现的

接下来是pom文件

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
  <modelVersion>4.0.0</modelVersion>
  <parent>
            <groupId>org.springframework.boot</groupId>
   			 <artifactId>spring-boot-starter-parent</artifactId>
    		<version>1.5.10.RELEASE</version>
        <relativePath>../pom.xml</relativePath>
    </parent>
  <groupId>com.wqc</groupId>
  <artifactId>aliAI-sound-spring</artifactId>
  <version>1.5.10.RELEASE</version>
  <properties>
  	<java.version>1.8</java.version>
  	</properties>
   <dependencies>
        <dependency>
            <groupId>com.alibaba.nls</groupId>
            <artifactId>nls-sdk-recognizer</artifactId>
            <version>2.1.0</version>
        </dependency>
        <dependency>
            <groupId>ch.qos.logback</groupId>
            <artifactId>logback-classic</artifactId>
            <version>1.0.13</version>
        </dependency>
        <!-- 语音读书 -->
        <dependency>
		  <groupId>com.hynnet</groupId>
		  <artifactId>jacob</artifactId>
		  <version>1.18</version>
		</dependency>
		<!-- 读取xml -->
		<dependency>
            <groupId>jaxen</groupId>
            <artifactId>jaxen</artifactId>
            <version>1.1-beta-11</version>
                <exclusions>
                <exclusion>
                    <groupId>xerces</groupId>
                    <artifactId>xercesImpl</artifactId>
                </exclusion>
            </exclusions>
        </dependency>

        <dependency>
            <groupId>dom4j</groupId>
            <artifactId>dom4j</artifactId>
            <version>1.6.1</version>
        </dependency>

        <dependency>
            <groupId>com.ibm.icu</groupId>
            <artifactId>icu4j</artifactId>
            <version>3.8</version>
        </dependency>

        <dependency>
            <groupId>xerces</groupId>
            <artifactId>xmlParserAPIs</artifactId>
            <version>2.6.2</version>
        </dependency>
    </dependencies>

    <build>
        <plugins>
           <plugin>
		      <groupId>org.apache.maven.plugins</groupId>
			<artifactId>maven-jar-plugin</artifactId>
			<version>2.3.1</version>
			<configuration>
				<archive>
					<manifest>
						<addClasspath>true</addClasspath>
					</manifest>
					<manifestEntries>
						<Premain-Class>
							com.xzq.test.PreAgent
						</Premain-Class>
					</manifestEntries>
				</archive>
			</configuration>
		
		     </plugin>
        </plugins>
    </build>
</project>

猜你喜欢

转载自blog.csdn.net/qq_35128576/article/details/99544211