第三章 学会使用音频的小波变换系数进行训练

最近在做一维信号处理

加入到一维卷积里面总是会出现维度不匹配的问题,有些许崩溃,但是用tensorflow就没有可以。。。

下一步打算把小波变换的系数导入到EXCLE表格里面 具体的结构是文件名+小波系数+标签

进行分割数据集分割之后加入到神经网络里面进行训练并且进行可视化操作 具体参考b站大师兄

2022-7-9   ======今天终于在同学的帮助下完成了小波变换的训练=============

        之前遇见的问题一般都是输入数据维度不匹配的问题,一个是音频数据的channel一定要混合成1个channel。一维数据输入的时候输入神经网络的shape应该是(Batch_size, 1, datashape)。

以 x =(100,1,1600)为例:

保证数据维度的方法一共有两种,一个是torch.view(100,1,1600),另一种是torch.unqueeze(x,1),注意这里的x一定是Tensor。

下面附上代码,这个模型是我自己参照其他博客的一维模型,训练效果不佳,还需要进一步的调整。

UrbanSoundDataset.py

# 创建自定义数据集
import os
import torch
from pywt import wavedec
from torch.utils.data import Dataset
import pandas as pd
import torchaudio
import numpy as np

class UrbanSoundDataset(Dataset):

    def __init__(self,
                 annotations_file,
                 audio_dir,
                 target_sample_rate,
                 num_samples,
                 device):
        self.annotations = pd.read_csv(annotations_file)
        self.audio_dir = audio_dir
        self.device = device
        self.target_sample_rate = target_sample_rate
        self.num_samples = num_samples


    def __len__(self):
        return len(self.annotations)

    def __getitem__(self, index):
        audio_sample_path = self._get_audio_sample_path(index)
        label = self._get_audio_sample_label(index)
        signal, sr = torchaudio.load(audio_sample_path)

        # 声音可能存在多通道
        # signal-> (num_channels, samples) -> (2,16000) -> (1,16000)
        signal = self._resample_if_necessary(signal, sr)
        signal = self._mix_down_if_necessary(signal)
        signal = self._cut_if_necessary(signal)
        signal = self._right_pad_if_necessary(signal)
        signal = self.wavelet_trans(signal)

        return signal, label

    def wavelet_trans(self, signal):
        # 读取音频数据
        Signal = signal
        coeffs = wavedec(Signal, 'db1', level=3)
        cA3, cD3, cD2, cD1 = coeffs
        xishu = np.append(cA3, cD3)
        xishu = np.append(xishu, cD2)
        signal = np.append(xishu, cD1)
        return signal

    def _cut_if_necessary(self, signal):
        if signal.shape[1] > self.num_samples:
            signal =signal[:, :self.num_samples] #取第1s
        return signal

    def _right_pad_if_necessary(self, signal):
        length_signal = signal.shape[1]
        if length_signal < self.num_samples:
            num_missing_samples = self.num_samples - length_signal
            last_dim_padding = (0, num_missing_samples)  # 两个数字分别表示左右填充0的个数
            # (1,1,2,1) 1,1表示在最后一个维度的左右填充,2,2表示在倒数第二个维度上填充
            signal = torch.nn.functional.pad(signal, last_dim_padding)
        return signal

    def _resample_if_necessary(self, signal, sr):
        if sr != self.target_sample_rate:
            resampler = torchaudio.transforms.Resample(sr, self.target_sample_rate)
            signal = resampler(signal)
        return signal

    def _mix_down_if_necessary(self, signal):
        # 聚合多个通道,并将其混合到一个通道
        if signal.shape[0] > 1:  # (2, 16000)
            signal = torch.mean(signal, dim=0, keepdim=True)  # 使用最小值的维度
        return signal


    def _get_audio_sample_path(self, index):
        # 获得数据的路径
        fold = f"fold{self.annotations.iloc[index, 5]}"
        path = os.path.join(self.audio_dir, fold, self.annotations.iloc[index, 0])
        return path

    def _get_audio_sample_label(self, index):
        return self.annotations.iloc[index, 6]

if __name__ == "__main__":
    ANNOTATIONS_FILE = "UrbanSound8K/metadata/UrbanSound8K.csv"
    AUDIO_DIR = "audio"
    SAMPLE_RATE = 16000  # 采样率
    NUM_SAMPLES = 1600  # 样本数量
    if torch.cuda.is_available():
        device = "cuda"
    else:
        device = "cpu"
    print(f"Using devie {device}")

    usd = UrbanSoundDataset(ANNOTATIONS_FILE,
                            AUDIO_DIR,
                            SAMPLE_RATE,
                            NUM_SAMPLES,
                            device)
    print(f"There are {len(usd)} samples in the dataset.")
    signal, label = usd[0]

Letnet.py

扫描二维码关注公众号,回复: 14753622 查看本文章
import torch
from torchsummary import summary

class LeNet(torch.nn.Module):
   def __init__(self, input_channels, input_sample_points, classes):
       super(LeNet, self).__init__()

       self.input_channels = input_channels
       self.input_sample_points = input_sample_points

       self.features = torch.nn.Sequential(
           torch.nn.Conv1d(input_channels, 20, kernel_size=5),
           torch.nn.BatchNorm1d(20),
           torch.nn.MaxPool1d(2),
           torch.nn.Conv1d(20, 50, kernel_size=5),
           torch.nn.BatchNorm1d(50),
           torch.nn.MaxPool1d(2),
       )

       self.After_features_channels = 50
       # 根据公式计算出通过所有的卷积层和池化层后输出的通道中样本点的数量
       # self.After_features_sample_points = ((input_sample_points - 5 + 1) // 2 - 5 + 1) // 2
       self.After_features_sample_points = ((input_sample_points-4)//2-4) // 2


       self.classifier = torch.nn.Sequential(
           torch.nn.Linear(self.After_features_channels * self.After_features_sample_points, 512),
           torch.nn.ReLU(),
           torch.nn.Linear(512, classes),


       )
       self.softmax=torch.nn.Softmax(dim=1)


   def forward(self, x):
       #print('输入x',x.size())


       x=x.view(100,1,1600)
       # print(x.size())
       # 检查输入样本维度是否有错误

       if x.size(1) != self.input_channels or x.size(2) != self.input_sample_points:
           raise Exception(
               '输入数据维度错误,输入维度应为[Batch_size,{},{}],实际输入维度为{}'.format(self.input_channels, self.input_sample_points,x.size()))

       x = self.features(x)
       # print(x.size())
       x = x.view(-1, self.After_features_channels * self.After_features_sample_points)
       # print(x.size())
       logits = self.classifier(x)
       predictions = self.softmax(logits)
       #print("prediction")
       # print(predictions.size())
       return predictions


if __name__ == '__main__':
   model = LeNet(input_channels=1, input_sample_points=1600, classes=4)
   input = torch.randn(size=(10, 1, 1, 1600))
   output = model(input)
   print(output.shape)
   #torch.Size([1, 5])
   summary(model=model, input_size=(1, 1, 1600), device='cpu')

train.py

import torch
import torchaudio
from torchsummary import summary
from torch import nn
from torch.utils.data import DataLoader
from UrbanSoundDataset import UrbanSoundDataset
from LetNet import LeNet

BATCH_SIZE = 100
Epochs = 50
LEARNING_RATE = 0.01
ANNOTATIONS_FILE = "UrbanSound8K/metadata/UrbanSound8K.csv"
AUDIO_DIR = "audio"
SAMPLE_RATE = 16000  # 采样率
NUM_SAMPLES = 1600  # 样本数量

def create_data_loader(train_data, batch_size):

    train_dataloader = DataLoader(train_data, batch_size=batch_size,drop_last=True)
    return train_dataloader



def train_single_epoch(model, data_loader, loss_fn, optimiser, device):
    correct=0
    for input, targets in data_loader:
        input, targets = input.to(device), targets.to(device)

        # calculate loss #每一个batch计算loss
        # 使用当前模型获得预测
        predictions = model(input)
        loss = loss_fn(predictions, targets)
        predicted = torch.max(predictions.data, 1)[1]
        correct += (predicted == targets).sum()
        #print(correct)
        # backpropagate loss and update weights
        optimiser.zero_grad()  # 在每个batch中让梯度重新为0
        loss.backward()  # 反向传播
        optimiser.step()

    print(f"Loss: {loss.item()}")  # 打印最后的batch的loss
    print(f"Accuracy:{correct/8730}")

def train(model, data_loader, loss_fn, optimiser, device, epochs):
    for i in range(epochs):
        print(f"Epoch {i+1}")
        train_single_epoch(model, data_loader, loss_fn, optimiser, device)
        print("---------------------")
    print("Training is down.")




if __name__ == "__main__":
    if torch.cuda.is_available():
        device = "cuda"
    else:
        device = "cpu"


    '''
    # instantiating our dataset object and create data loader
    mel_spectrogram = torchaudio.transforms.MelSpectrogram(
        sample_rate=SAMPLE_RATE,
        n_fft=1024,
        hop_length=512,
        n_mels=64
    )  # 将mel频谱图传递给usd
    # ms = mel_spectrogram(signal)'''
    usd = UrbanSoundDataset(ANNOTATIONS_FILE,
                            AUDIO_DIR,
                            SAMPLE_RATE,
                            NUM_SAMPLES,
                            device)


    train_dataloader = create_data_loader(usd, BATCH_SIZE)
    # build model

    '''  cnn = VGG19().to(device)
    summary(cnn.cuda(), (1, 4800))'''
    cnn = LeNet(input_channels=1, input_sample_points=1600, classes=10).cuda()
    # input = torch.randn(size=(10, 1, 1600))    #这个本来就有吗 是的
    # output = cnn(input)
    # print('over')
    # # print(output.shape)
    # # torch.Size([1, 5])
    # summary(cnn.cuda(), input_size=(1, 1600))
    # instantiate loss function + optimiser
    loss_fn = nn.CrossEntropyLoss()
    optimiser = torch.optim.Adam(cnn.parameters(),
                                 lr=LEARNING_RATE,)

    # train model
    train(cnn, train_dataloader, loss_fn, optimiser, device, Epochs)

    torch.save(cnn.state_dict(), "cnnnet_wavelet_urbansound.pth")
    print("Model trained and stored at cnnnet_wavelet_urbansound.pth")

将小波变换系数放到csv文件里面

import xlwt
import numpy as np
import os
import librosa
from pywt import wavedec
import csv
path = r'D:\python\Urbansoun8k_wavelet\audio\fold1'  # 文件夹目录
files = os.listdir(path)  # 得到文件夹下的所有文件名称
length = len(files)
f = open('taobao.csv', 'w', encoding='utf-8', newline="")
csv_writer = csv.writer(f)
for i in range(length):
    signal, sr = librosa.load(path+'/'+files[i], sr=16000, duration=0.1)
    coeffs = wavedec(signal, 'db1', level=3)
    cA3, cD3, cD2, cD1 = coeffs
    xishu = np.append(cA3, cD3)
    xishu = np.append(xishu, cD2)
    dataset = np.append(xishu, cD1).tolist()
    csv_writer.writerow(dataset)  # 可省略
f.close()
print("over")

猜你喜欢

转载自blog.csdn.net/weixin_48983346/article/details/125665507