文章目录
ref:
语音活性检测器py-webrtcvad安装使用: https://www.cnblogs.com/zhenyuyaodidiao/p/9288455.html
代码及使用示例
创建虚拟环境,并安装webrtcvad
代码:
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2021/10/26 上午9:57
# @Author : me
# @File : test_webrtcvad.py
# @Version :
# @Software: PyCharm
# @Description : 'This is ...'
import collections
import contextlib
import os
import sys
import wave
import webrtcvad
def read_wave(path):
with contextlib.closing(wave.open(path, 'rb')) as wf:
num_channels = wf.getnchannels()
assert num_channels == 1
sample_width = wf.getsampwidth()
assert sample_width == 2
sample_rate = wf.getframerate()
assert sample_rate in (8000, 16000, 32000)
pcm_data = wf.readframes(wf.getnframes())
return pcm_data, sample_rate
def write_wave(path, audio, sample_rate):
with contextlib.closing(wave.open(path, 'wb')) as wf:
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(sample_rate)
wf.writeframes(audio)
class Frame(object):
def __init__(self, bytes, timestamp, duration):
self.bytes = bytes
self.timestamp = timestamp
self.duration = duration
def frame_generator(frame_duration_ms, audio, sample_rate):
n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)
offset = 0
timestamp = 0.0
duration = (float(n) / sample_rate) / 2.0
while offset + n < len(audio):
yield Frame(audio[offset:offset + n], timestamp, duration)
timestamp += duration
offset += n
def vad_collector(sample_rate, frame_duration_ms,
padding_duration_ms, vad, frames):
end_s = 0
num_padding_frames = int(padding_duration_ms / frame_duration_ms)
ring_buffer = collections.deque(maxlen=num_padding_frames)
triggered = False
voiced_frames = []
for frame in frames:
sys.stdout.write(
'1' if vad.is_speech(frame.bytes, sample_rate) else '0')
if not triggered:
ring_buffer.append(frame)
num_voiced = len([f for f in ring_buffer
if vad.is_speech(f.bytes, sample_rate)])
if num_voiced > 0.9 * ring_buffer.maxlen:
sys.stdout.write('+(%s)' % (ring_buffer[0].timestamp,))
triggered = True
voiced_frames.extend(ring_buffer)
ring_buffer.clear()
else:
voiced_frames.append(frame)
ring_buffer.append(frame)
num_unvoiced = len([f for f in ring_buffer
if not vad.is_speech(f.bytes, sample_rate)])
if num_unvoiced > 0.9 * ring_buffer.maxlen:
sys.stdout.write('-zd(%s)' % (frame.timestamp + frame.duration))
end_s = frame.timestamp + frame.duration
triggered = False
yield b''.join([f.bytes for f in voiced_frames]) # todo
ring_buffer.clear()
voiced_frames = []
if triggered:
sys.stdout.write('--(%s)' % (frame.timestamp + frame.duration))
sys.stdout.write('\n')
if voiced_frames:
yield b''.join([f.bytes for f in voiced_frames]) # todo
pass
return end_s
def main(args):
if len(args) != 2:
sys.stderr.write(
'Usage: example.py <aggressiveness> <path to wav file>\n')
sys.exit(1)
audio, sample_rate = read_wave(args[1])
vad = webrtcvad.Vad(int(args[0]))
frames = frame_generator(30, audio, sample_rate)
frames = list(frames)
segments = vad_collector(sample_rate, 30, 300, vad, frames)
for i, segment in enumerate(segments):
# path = 'chunk-%002d.wav' % (i,)
print('--end')
# write_wave(path, segment, sample_rate)
def test(wav):
vad_arg = 2
audio, sample_rate = read_wave(wav)
vad = webrtcvad.Vad(vad_arg)
frames = frame_generator(30, audio, sample_rate)
frames = list(frames)
segments = vad_collector(sample_rate, 30, 300, vad, frames)
for i, segment in enumerate(segments):
path = 'chunk-%002d.wav' % (i,)
print('--end')
write_wave(path, segment, sample_rate)
def chunk_speech(wav_fl):
vad_arg = 2
audio, sample_rate = read_wave(wav_fl)
vad = webrtcvad.Vad(vad_arg)
frames = frame_generator(30, audio, sample_rate)
frames = list(frames)
segments = vad_collector(sample_rate, 30, 300, vad, frames)
for i, segment in enumerate(segments):
path = wav_fl + '-chunk-%002d.wav' % (i,)
print('--end')
write_wave(path, segment, sample_rate)
def vad_folder(path):
fileList = os.listdir(path)
for fl in fileList:
if fl.endswith('.wav'):
chunk_speech(path + '/' + fl)
if __name__ == '__main__':
main(sys.argv[1:])
# test('./hotword_0_train_0.wav')
# path = './'
# vad_folder(path)
使用示例:
python test_webrtcvad.py 2 hotword_0_train_0.wav
其中2.07秒是语音开始处, 3.089是语音结束处:
基于webrtc的vad做语音截取
- 单个文件按照vad截取
def test(wav):
vad_arg = 2
audio, sample_rate = read_wave(wav)
vad = webrtcvad.Vad(vad_arg)
frames = frame_generator(30, audio, sample_rate)
frames = list(frames)
segments = vad_collector(sample_rate, 30, 300, vad, frames)
for i, segment in enumerate(segments):
path = 'chunk-%002d.wav' % (i,)
print('--end')
write_wave(path, segment, sample_rate)
if __name__ == '__main__':
# main(sys.argv[1:])
test('./hotword_0_train_0.wav')
生成的chunk-00.wav, 即按照start=2.07, end=3.089,时长为1.02,进行截取的:
- 文件夹内所有文件的vad截取
def chunk_speech(wav_fl):
vad_arg = 2
audio, sample_rate = read_wave(wav_fl)
vad = webrtcvad.Vad(vad_arg)
frames = frame_generator(30, audio, sample_rate)
frames = list(frames)
segments = vad_collector(sample_rate, 30, 300, vad, frames)
for i, segment in enumerate(segments):
path = wav_fl + '-chunk-%002d.wav' % (i,)
print('--end')
write_wave(path, segment, sample_rate)
def vad_folder(path):
fileList = os.listdir(path)
for fl in fileList:
if fl.endswith('.wav'):
chunk_speech(path + '/' + fl)
if __name__ == '__main__':
# main(sys.argv[1:])
# test('./hotword_0_train_0.wav')
path = './'
vad_folder(path)
当前文件夹:
python test_webrtcvad.py
运行后生成的截取文件: