阿里天池比赛——食物声音识别

阿里天池比赛——食物声音识别

最近写毕业论文无聊之余,再次参加阿里天池比赛,之前一直做CV,第一次尝试做语音识别,记录一下过程。
在这里插入图片描述
策略:
1.梅尔频谱和梅尔倒谱以及混合
2.多模型测试
想玩这个项目的同学可以查看连接:
阿里天赐比赛
我的代码连接
链接:https://pan.baidu.com/s/1pX21kMX901O7QKcb-m-B6g
提取码:fasf
–来自百度网盘超级会员V5的分享

# 基本库
from audioop import mul
from functools import lru_cache
import pandas as pd
import numpy as np

from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold,StratifiedKFold

from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score

from sklearn.preprocessing import minmax_scale
# 搭建分类模型所需要的库

from keras.models import Sequential, Model
from keras.layers import Conv2D, Flatten, Dense, MaxPool2D, Dropout, LSTM, BatchNormalization, Input, Conv1D,\
    BatchNormalization, GlobalAveragePooling1D, concatenate, Permute, Dropout, MaxPool1D, Flatten, \
    Reshape, Lambda, RepeatVector, Multiply
from keras.utils import to_categorical
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
import os
import librosa
import librosa.display
import glob
from tqdm import tqdm
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping, CSVLogger, TensorBoard
from keras import optimizers
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.ensemble import VotingClassifier
from keras import regularizers
from sklearn.utils import class_weight



# 建立类别标签,不同类别对应不同的数字。
label_dict = {
    
    'aloe': 0, 'burger': 1, 'cabbage': 2,'candied_fruits':3, 'carrots': 4, 'chips':5,
              'chocolate': 6, 'drinks': 7, 'fries': 8, 'grapes': 9, 'gummies': 10, 'ice-cream':11,
              'jelly': 12, 'noodles': 13, 'pickles': 14, 'pizza': 15, 'ribs': 16, 'salmon':17,
              'soup': 18, 'wings': 19}
label_dict_inv = {
    
    v:k for k,v in label_dict.items()}


def extract_features(parent_dir, sub_dirs, max_file=10, file_ext="*.wav", flag="mix"):
    c = 0
    label, feature = [], []
    for sub_dir in sub_dirs:
        for fn in tqdm(glob.glob(os.path.join(parent_dir, sub_dir, file_ext))[:max_file]):  # 遍历数据集的所有文件
            label_name = fn.split('/')[-1].split('\\')[0]
            # label_name = fn.split('/')[-2]
            label.extend([label_dict[label_name]])
            X, sample_rate = librosa.load(fn, res_type='kaiser_fast')
            if flag == "mfcc":
                mfcc = np.mean(librosa.feature.mfcc(y=X,  # 梅尔倒谱
                                                    sr=sample_rate,
                                                    n_mfcc=128).T,
                                                    axis=0)
                feature.append(mfcc)
            elif flag == "mix":
                n0 = 9000
                n1 = 9100
                mfcc = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=128).T,axis=0)
                zero_crossings = librosa.zero_crossings(X[n0:n1], pad=False)
                temp = np.hstack((mfcc,zero_crossings))
                hop_length = 512
                chromagram = np.mean(librosa.feature.chroma_stft(X, sr=sample_rate, hop_length=hop_length).T,axis=0)
                temp = np.hstack((temp,chromagram))
                feature.append(temp)
            else:
                mels = np.mean(librosa.feature.melspectrogram(y=X,  # 计算梅尔频谱(mel spectrogram),并把它作为特征
                                                              sr=sample_rate).T,
                                                              axis=0)
                feature.extend([mels])
            

    # 获取特征feature以及类别的label
    return [feature, label]


# 自己更改目录
# parent_dir = './train_sample/'
parent_dir = './train/'
save_dir = "./"
folds = sub_dirs = np.array(['aloe','burger','cabbage','candied_fruits',
                             'carrots','chips','chocolate','drinks','fries',
                             'grapes','gummies','ice-cream','jelly','noodles','pickles',
                             'pizza','ribs','salmon','soup','wings'])

# 获取特征feature以及类别的label
# temp = extract_features(parent_dir, sub_dirs, max_file=1000)
# # features, labels = extract_features(parent_dir,sub_dirs,max_file=100)
# temp = np.array(temp)
# data = temp.transpose()
# # 获取特征
# X = np.vstack(data[:, 0])
# # 获取标签
# Y = np.array(data[:, 1])
# print('X的特征尺寸是:',X.shape)
# print('Y的特征尺寸是:',Y.shape)

# # 在Keras库中:to_categorical就是将类别向量转换为二进制(只有0和1)的矩阵类型表示
# Y = to_categorical(Y)

# '''最终数据'''
# print(X.shape)
# print(Y.shape)
# np.save('features',X)
# np.save('label',Y)

def dnn(cnn_shape = (16, 8, 1), lstm_shape = (128, 1)):
    cnn_input = Input(shape=cnn_shape, name='cnn_input')
    lstm_input = Input(shape=lstm_shape, name='lstm_input')
    input_dim = (16, 8, 1)

    x = LSTM(64, return_sequences=False)(lstm_input)
    x = Dense(64,activation='softmax')(x)

    y1 = Conv1D(64, 5, padding='same', activation='relu')(lstm_input)
    y1 = MaxPool1D(pool_size=3)(y1)
    dim_num = y1.shape[1]
    x = RepeatVector(dim_num)(x)
    y1 = Multiply()([y1, x])
  
    y = Conv2D(64, (3, 3), padding="same", activation="relu", input_shape=input_dim)(cnn_input)
    y = BatchNormalization()(y)
    y = MaxPool2D(pool_size=(2, 2))(y)
    y = Dropout(0.15)(y)
    y = Conv2D(128, (3, 3), padding="same", activation="relu")(y)
    y = BatchNormalization()(y)
    y = MaxPool2D(pool_size=(2, 2))(y)
    y = Dropout(0.3)(y)
    y = Conv2D(128, (3, 3), padding="same", activation="relu")(y)
    y = BatchNormalization()(y)
    y = Dropout(0.2)(y)
    y = Reshape((-1, 64))(y)

    output = concatenate([y, y1], axis=1)
    output = GlobalAveragePooling1D()(output)
    # output = Flatten()(y)
    # output = Dense(1024,activation='relu')(output)
    output = Dense(20, activation="softmax")(output)
    model = Model(inputs=[cnn_input, lstm_input], outputs=output)
    model.summary()
    optimizer = optimizers.Adam(lr=0.001)
    model.compile(optimizer = optimizer, 
                    loss = 'categorical_crossentropy', 
                    metrics = ['accuracy'])
    return model

def cnn(input_shape=(16, 8, 1)):
    model_conv = Sequential()
    # 输入的大小
    input_dim = input_shape
    model_conv.add(Conv2D(64, (5, 5), padding = "same", activation = "relu", input_shape = input_dim))# 卷积层
    model_conv.add(MaxPool2D(pool_size=(2, 2)))# 最大池化
    model_conv.add(Conv2D(128, (3, 3), padding = "same", activation = "relu")) #卷积层
    model_conv.add(MaxPool2D(pool_size=(2, 2))) # 最大池化层
    model_conv.add(Dropout(0.1))
    model_conv.add(Flatten()) # 展开
    model_conv.add(Dense(1024, activation = "relu"))
    model_conv.add(Dense(100, activation='relu'))
    model_conv.add(Dense(20, activation = "softmax")) # 输出层:20个units输出20个类的概率
    optimizer = optimizers.Adam(lr=0.001)
    model_conv.compile(optimizer = optimizer, 
                    loss = 'categorical_crossentropy', 
                    metrics = ['accuracy'])
    model_conv.summary()
    return model_conv


def train(model=cnn, nf=True, featrue_path="features_mfcc.npy", label_path="label_mfcc.npy"):
    X = np.load(featrue_path)
    Y = np.load(label_path)
    if nf:
        nfold = 5
        kf = KFold(n_splits=nfold, shuffle=True, random_state=2020)
        
        i = 0
        for train_index, valid_index in kf.split(X, Y):
            train_x, val_x = X[train_index],X[valid_index]
            train_y, val_y = Y[train_index],Y[valid_index]
            
            checkpoint = ModelCheckpoint("./record/weight/dnn_mfcc-ep{epoch:03d}-loss{loss:.3f}-val_acc{val_acc:.3f}.h5", #  ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5 
                                        monitor="val_acc",
                                        verbose=1, save_best_only=True,
                                        mode='max')
            reduce_lr = ReduceLROnPlateau(monitor='val_acc', 
                                        patience=100, mode='auto',
                                        factor=0.1, cooldown=0, 
                                        min_lr=1e-5, 
                                        verbose=1)
            csvlogger = CSVLogger(filename='./record/log/train.csv',
                                append=True)
            earlystopping = EarlyStopping(monitor='val_acc',
                                        min_delta=0,
                                        patience=100,
                                        verbose=1,
                                        mode='max')
            tensorboard = TensorBoard(log_dir="./record/log/")

            # 训练模型
            if model == cnn:
                train_x = train_x.reshape(-1, 16, 8, 1)
                val_x = val_x.reshape(-1, 16, 8, 1)
                model.fit(train_x, train_y, 
                        epochs = 500, 
                        batch_size = 128, 
                        validation_data = (val_x, val_y),
                        callbacks=[checkpoint, reduce_lr, csvlogger, earlystopping, tensorboard])
            else:
                X_train = train_x.reshape(-1, 16, 8, 1)
                X_val = val_x.reshape(-1, 16, 8, 1)
                lstm_input = train_x.reshape(-1, 128, 1)
                lstm_val = val_x.reshape(-1, 128, 1)
                model.fit({
    
    'cnn_input': X_train, 'lstm_input': lstm_input}, 
                            train_y, 
                            epochs=500, 
                            batch_size=128,
                            validation_data=({
    
    'cnn_input': X_val, 'lstm_input': lstm_val}, val_y),
                            callbacks=[checkpoint, reduce_lr, csvlogger, earlystopping, tensorboard])

    else:
        train_x, val_x, train_y, val_y = train_test_split(X, Y, random_state = 1, stratify=Y)
        # print('训练集的大小',len(X_train))
        # print('测试集的大小',len(X_test))
        # X_train = X_train.reshape(-1, 16, 8, 1)
        # X_test = X_test.reshape(-1, 16, 8, 1)
        # model.compile(optimizer = 'adam', 
        #             loss = 'categorical_crossentropy',
        #             metrics = ['accuracy'])
        # model.summary()
        # # 训练模型
        # model.fit(X_train, Y_train, 
        #         epochs = 500, 
        #         batch_size = 15, 
        #         validation_data = (X_test, Y_test))
        if model == cnn:
               
                train_x = train_x.reshape(-1, 16, 8, 1)
                val_x = val_x.reshape(-1, 16, 8, 1)
                model.fit(train_x, train_y, 
                        epochs = 500, 
                        batch_size = 128, 
                        validation_data = (val_x, val_y),
                        callbacks=[checkpoint, reduce_lr, csvlogger, earlystopping, tensorboard])
        else:
            X_train = train_x.reshape(-1, 16, 8, 1)
            X_val = val_x.reshape(-1, 16, 8, 1)
            lstm_input = train_x.reshape(-1, 128, 1)
            lstm_val = val_x.reshape(-1, 128, 1)
            model.fit({
    
    'cnn_input': X_train, 'lstm_input': lstm_input}, 
                        train_y, 
                        epochs=1000, batch_size=128,
                        validation_data=({
    
    'cnn_input': X_val, 'lstm_input': lstm_val}, val_y),
                        callbacks=[checkpoint, reduce_lr, csvlogger, earlystopping, tensorboard])


def extract_features(test_dir, file_ext="*.wav", flag="mix"):
    feature = []
    for fn in tqdm(glob.glob(os.path.join(test_dir, file_ext))[:]): # 遍历数据集的所有文件
        X, sample_rate = librosa.load(fn,res_type='kaiser_fast')
        if flag == "mfcc":
            mfcc = np.mean(librosa.feature.mfcc(y=X,  # 梅尔倒谱
                                                sr=sample_rate,
                                                n_mfcc=128).T,
                                                axis=0)
            feature.append(mfcc)
        elif flag == "mix":
                n0 = 9000
                n1 = 9100
                mfcc = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=128).T,axis=0)
                zero_crossings = librosa.zero_crossings(X[n0:n1], pad=False)
                temp = np.hstack((mfcc,zero_crossings))
                hop_length = 512
                chromagram = np.mean(librosa.feature.chroma_stft(X, sr=sample_rate, hop_length=hop_length).T,axis=0)
                temp = np.hstack((temp,chromagram))
                feature.append(temp)
        else:
            mels = np.mean(librosa.feature.melspectrogram(y=X,  # 计算梅尔频谱(mel spectrogram),并把它作为特征
                                                          sr=sample_rate).T,
                                                            axis=0)
            feature.extend([mels])
        # mels = np.mean(librosa.feature.melspectrogram(y=X,sr=sample_rate).T,axis=0) # 计算梅尔频谱(mel spectrogram),并把它作为特征
        # feature.extend([mels])
    return feature


def voting(preds_conv, preds_dense, preds_lstm):
    prob_max = np.tile(np.max(preds_conv, axis=1).reshape(-1, 1), preds_conv.shape[1])
    preds_c = preds_conv // prob_max
    prob_max = np.tile(np.max(preds_dense, axis=1).reshape(-1, 1), preds_dense.shape[1])
    preds_d = preds_dense // prob_max
    prob_max = np.tile(np.max(preds_lstm, axis=1).reshape(-1, 1), preds_lstm.shape[1])
    preds_l = preds_lstm // prob_max
    result_voting = preds_c + preds_d + preds_l
    preds_voting = np.argmax(result_voting, axis=1)
    return preds_voting



def mul_test(cnn, dnn, cnn_weight, dnn_weight, test_path='./test_a/'):

    X_test = extract_features(test_path, flag="mfcc")
    X_test = np.vstack(X_test)
    cnn.load_weights(cnn_weight)
    cnn = cnn.predict(X_test.reshape(-1, 16, 8, 1))
    dnn.load_weights(dnn_weight)
    x_test = X_test.reshape(-1, 16, 8, 1)
    lstm_test = X_test.reshape(-1, 128, 1)
    dnn = dnn.predict({
    
    'cnn_input': x_test, 'lstm_input': lstm_test})
    preds = voting(cnn, dnn, dnn)
    preds = [label_dict_inv[x] for x in preds]

    path = glob.glob('./test_a/*.wav')
    result = pd.DataFrame({
    
    'name':path, 'label': preds})

    result['name'] = result['name'].apply(lambda x: x.split('\\')[-1])
    result.to_csv('submit4.csv', index=None)

# mul_test(cnn, cnn)

def single_test(model, weight, test_path='./test_a/'):
    X_test = extract_features(test_path, flag="mfcc")
    X_test = np.vstack(X_test)
    model.load_weights(weight)
    x_test = X_test.reshape(-1, 16, 8, 1)
    lstm_test = X_test.reshape(-1, 128, 1)
    predictions = model.predict({
    
    'cnn_input': x_test, 'lstm_input': lstm_test})
    # predictions = model.predict(x_test)
    
    preds = np.argmax(predictions, axis = 1)
    preds = [label_dict_inv[x] for x in preds]
    path = glob.glob('./test_a/*.wav')
    result = pd.DataFrame({
    
    'name':path, 'label': preds})
    result['name'] = result['name'].apply(lambda x: x.split('\\')[-1])
    result.to_csv('submit3.csv', index=None)


if __name__ == "__main__":
    dnn = dnn(cnn_shape=(16, 8, 1), lstm_shape=(128, 1))
    cnn = cnn(input_shape=(16, 8, 1))
    # train(model=dnn, nf=True, featrue_path="features_mfcc.npy", label_path="label_mfcc.npy")
    cnn_weight = "./record/weight/cnn_mfcc-ep001-loss0.000-val_acc1.000.h5"
    dnn_weight = "./record/weight/dnn_mfcc-ep001-loss0.003-val_acc1.000.h5"
    mul_test(cnn, dnn, cnn_weight, dnn_weight)
    # single_test(dnn, dnn_weight)

猜你喜欢

转载自blog.csdn.net/CharmsLUO/article/details/123264298