Training + Testing + Results Visualization

Self-designed network CopyNet

copynet.py

import torch
from torchsummary import summary


class CopyNet(torch.nn.Module):
   def __init__(self,input_channels,input_sample_points,classes):
       super(CopyNet, self).__init__()

       self.input_channels = input_channels
       self.input_sample_points = input_sample_points

       self.features = torch.nn.Sequential(

           torch.nn.Conv1d(input_channels, 16, kernel_size=4, stride=1, padding=0),
           torch.nn.BatchNorm1d(16),
           torch.nn.ReLU(inplace=True),
           #torch.nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2),
           torch.nn.MaxPool1d(kernel_size=4, stride=4),

           torch.nn.Conv1d(16, 32, kernel_size=4, padding=0),
           torch.nn.BatchNorm1d(32),
           torch.nn.ReLU(inplace=True),
           #torch.nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2),
           torch.nn.MaxPool1d(kernel_size=4, stride=4),

           torch.nn.Conv1d(32, 64, kernel_size=4, padding=0),
           torch.nn.BatchNorm1d(64),
           torch.nn.ReLU(inplace=True),
           torch.nn.MaxPool1d(kernel_size=4, stride=4),
   		#自适应平均池化不管输入多少输出一定为6
       )

       self.classifier = torch.nn.Sequential(

           torch.nn.Linear(1536, classes),

       )
       self.softmax = torch.nn.Softmax(dim=1)

   def forward(self, x):
       x = x.view(100, 1, 1600)
       if x.size(1)!=self.input_channels or x.size(2)!=self.input_sample_points:
           raise Exception('输入数据维度错误,输入维度应为[Batch_size,{},{}],实际输入维度为{}'.format(self.input_channels, self.input_sample_points, x.size()))
       x = self.features(x)
       x = x.view(-1, 1536)
       logits = self.classifier(x)
       predicetions =self.softmax(logits)

       return predicetions


if __name__ == '__main__':
   model = CopyNet(input_channels=1, input_sample_points=1600, classes=4)
   input = torch.randn(size=(100, 1, 1600))
   output = model(input)
   print(output.shape)
   #torch.Size([1, 5])

Data processing module->dataset+dataloader

class MyDataset(Dataset):

    def __init__(self, path):
        all_df = pd.read_csv(path)
        ndarry = all_df.values
        Label = ndarry[:, 1600]
        Features = ndarry[:, :1600]
        minmax_scale = preprocessing.MinMaxScaler(feature_range=(0, 1))
        scaleFeatures = minmax_scale.fit_transform(Features)
        self.Features = scaleFeatures
        self.Label = Label

    def __getitem__(self, index):
        # 根据索引返回数据和对应的标签
        return self.Features[index], self.Label[index]

    def __len__(self):
        # 返回文件数据的数目
        return len(self.Label)

def creat_data_loader(train_data,batch_size):
    train_dataloader = DataLoader(dataset=train_data, batch_size=batch_size, drop_last=True)
    #test_dataloader = DataLoader(dataset=test_data, batch_size=batch_size, drop_last=True, shuffle=True)
    return train_dataloader

to train

train.py

import torch
from torch import nn, optim
from torch.optim.lr_scheduler import CosineAnnealingLR
from torchsummary import summary
import timm
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from matplotlib import pyplot as plt
import numpy as np
from tqdm import tqdm
import Ranger
from train1 import MyDataset
from train1 import creat_data_loader
from copyNet import CopyNet

def get_dataloader(batch_size):
    train_Dataset = MyDataset(path="D:\python\python\outputdata.csv")
    test_Dataset = MyDataset(path="D:\python\python/testdata.csv")
    train_dataloader = creat_data_loader(train_Dataset, batch_size=100)
    test_dataloader = creat_data_loader(test_Dataset, batch_size=100)
    print('训练数据集长度: {}'.format(len(train_Dataset)))
    print('测试数据集长度: {}'.format(len(test_Dataset)))
    # DataLoader创建数据集
    train_dataloader = DataLoader(train_Dataset, batch_size=batch_size, shuffle=True)
    test_dataloader = DataLoader(test_Dataset, batch_size=batch_size, shuffle=True)
    return train_dataloader, test_dataloader


def show_pic(dataloader):  # 展示dataloader里的6张图片
    examples = enumerate(dataloader)  # 组合成一个索引序列
    batch_idx, (example_data, example_targets) = next(examples)
    classes = ('fold1', 'fold2', 'fold3', 'fold4')
    fig = plt.figure()
    for i in range(4):
        plt.subplot(2, 2, i + 1)
        # plt.tight_layout()
        img = example_data[i]
        print('pic shape:', img.shape)
        #img = img.swapaxes(0, 1)  # 用法就是交换轴的位置,前后两个的位置没有关系。
        #img = img.swapaxes(1, 2)
        plt.imshow(img, interpolation='none')
        plt.title(classes[example_targets[i].item()])
        plt.xticks([])
        plt.yticks([])
    plt.show()


def get_net():  # 获得预训练模型并冻住前面层的参数
    net = CopyNet(input_channels=1, input_sample_points=1600, classes=4).cuda()
    #print(summary(net, input_size=(128, 3, 224, 224)))
    '''Freeze all layers except the last layer(fc or classifier)'''
    '''for param in net.parameters():
        param.requires_grad = False
    # nn.init.xavier_normal_(model.fc.weight)
    # nn.init.zeros_(model.fc.bias)
    net.fc.weight.requires_grad = True
    net.fc.bias.requires_grad = True'''
    return net


def train(net, loss, train_dataloader, valid_dataloader, device, batch_size, num_epoch, lr, lr_min, optim='sgd',
          init=True, scheduler_type='Cosine'):
    def init_xavier(m):
        # if type(m) == nn.Linear or type(m) == nn.Conv2d:
        if type(m) == nn.Linear:
            nn.init.xavier_normal_(m.weight)

    if init:
        net.apply(init_xavier)

    print('training on:', device)
    net.to(device)

    if optim == 'sgd':
        optimizer = torch.optim.SGD((param for param in net.parameters() if param.requires_grad), lr=lr,
                                    weight_decay=0)
    elif optim == 'adam':
        optimizer = torch.optim.Adam((param for param in net.parameters() if param.requires_grad), lr=lr,
                                     weight_decay=0)
    elif optim == 'adamW':
        optimizer = torch.optim.AdamW((param for param in net.parameters() if param.requires_grad), lr=lr,
                                      weight_decay=0)
    elif optim == 'ranger':
        optimizer = Ranger((param for param in net.parameters() if param.requires_grad), lr=lr,
                           weight_decay=0)
    if scheduler_type == 'Cosine':
        scheduler = CosineAnnealingLR(optimizer, T_max=num_epoch, eta_min=lr_min)

    train_losses = []
    train_acces = []
    eval_acces = []
    best_acc = 0.0
    for epoch in range(num_epoch):

        print("——————第 {} 轮训练开始——————".format(epoch + 1))

        # 训练开始
        net.train()
        train_acc = 0
        for batch in tqdm(train_dataloader, desc='训练'):
            imgs, targets = batch
            imgs = imgs.type(torch.FloatTensor).cuda()
            targets = targets.to(torch.int64).cuda()
            # imgs = imgs.to(device)
            # targets = targets.to(device)
            output = net(imgs)

            Loss = loss(output, targets)

            optimizer.zero_grad()
            Loss.backward()
            optimizer.step()

            _, pred = output.max(1)
            num_correct = (pred == targets).sum().item()
            acc = num_correct / (batch_size)
            train_acc += acc
        scheduler.step()
        print("epoch: {}, Loss: {}, Acc: {}".format(epoch, Loss.item(), train_acc / len(train_dataloader)))
        train_acces.append(train_acc / len(train_dataloader))
        train_losses.append(Loss.item())

        # 测试步骤开始
        net.eval()
        eval_loss = 0
        eval_acc = 0
        with torch.no_grad():
            for imgs, targets in valid_dataloader:
                imgs = imgs.type(torch.FloatTensor).cuda()
                targets = targets.to(torch.int64).cuda()
                output = net(imgs).to(device)
                Loss = loss(output, targets)
                _, pred = output.max(1)
                num_correct = (pred == targets).sum().item()
                eval_loss += Loss
                acc = num_correct / imgs.shape[0]
                eval_acc += acc

            eval_losses = eval_loss / (len(valid_dataloader))
            eval_acc = eval_acc / (len(valid_dataloader))
            if eval_acc > best_acc:
                best_acc = eval_acc
                torch.save(net.state_dict(), 'best_acc.pth')
            eval_acces.append(eval_acc)
            print('\n',"整体验证集上的Loss: {}".format(eval_losses))
            print("整体验证集上的正确率: {}".format(eval_acc))
    return train_losses, train_acces, eval_acces


def show_acces(train_losses, train_acces, valid_acces, num_epoch):  # 对准确率和loss画图显得直观
    plt.plot(1 + np.arange(len(train_losses)), train_losses, linewidth=1.5, linestyle='dashed', label='train_losses')
    plt.plot(1 + np.arange(len(train_acces)), train_acces, linewidth=1.5, linestyle='dashed', label='train_acces')
    plt.plot(1 + np.arange(len(valid_acces)), valid_acces, linewidth=1.5, linestyle='dashed', label='valid_acces')
    plt.grid()
    plt.xlabel('epoch')
    plt.xticks(range(1, 1 + num_epoch, 1))
    plt.legend()
    plt.show()


if __name__ == '__main__':
    train_dataloader, test_dataloader = get_dataloader(batch_size=100)
    # show_pic(train_dataloader)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    net = get_net()
    loss = nn.CrossEntropyLoss()
    train_losses, train_acces, eval_acces = train(net, loss, train_dataloader, test_dataloader, device, batch_size=64,
                                                  num_epoch=20, lr=5e-5, lr_min=1e-10, optim='sgd', init=False)
    show_acces(train_losses, train_acces, eval_acces, num_epoch=20)

training result
Is there something wrong with my dataset?
The visualization method is taken from Zhihu

Guess you like

Origin blog.csdn.net/weixin_48983346/article/details/125714631
Recommended