The source code of "pytorch model classification network"

Explanation: "Pytorch Model Segmentation Network" , the code of this article has errors. I tweaked it a bit and it works fine.

Title: pytorch hands-on practice: pytorch model classification network
1) Explanation, code, mainly refer to Zhihu article "pytorch model classification network" , the code is standardized and easy to read, but the original code does not work. I debugged and modified it and it worked. Xiaobai can refer to the source code of this article.
2) This project is about vehicle classification , resnet50 network, which can be used for basic learning.
3) Download data: Download the link "10 Types of Vehicle Type Recognition Dataset" , a public vehicle dataset, training model, used for vehicle recognition and vehicle classification. , using the provided 2000 high-resolution images annotated for vehicle scene classification of 10 types of cars. Tag information: bus,taxi,truck,family sedan,minibus,jeep,SUV,heavy truck,racing car,fire engine.
 

1. The source code is as follows:

"""
********类别说明**********>
0,巴士,bus
1,出租车,taxi
2,货车,truck
3,家用轿车,family sedan
4,面包车,minibus
5,吉普车,jeep
6,运动型多功能车,SUV
7,重型货车,heavy truck
8,赛车,racing car
9,消防车,fire engine
"""
import torch
from torchvision import transforms
from torchvision import datasets
import matplotlib.pyplot as plt
import torch.nn.functional as F
import numpy as np
from torchvision import models
import torch.nn as nn
from torch import optim

# ****************setting*******************
NUM_CLASSES = 10
BATCH_SIZE = 32
NUM_EPOCHS = 25
# 下载地址:https://download.pytorch.org/models/resnet50-19c8e357.pth
PRETRAINED_MODEL = './resnet50-19c8e357.pth'
MODEL_SAVE_PATH = 'trained_models/vehicle-10_record.pth'  # 数据集的存放位置
TRAIN_DATASET_DIR = r'D:/PycharmProjects/studyPytorch/studyTest/data/car_class10_data/train'
VALID_DATASET_DIR = r'D:/PycharmProjects/studyPytorch/studyTest/data/car_class10_data/val'
TRAIN_BATCH_SIZE = 128
TEST_BATCH_SIZE = 128
DROPOUT_RATE = 0.3
show_interval_num = 10
epochs = 20
# 此处数据是分别放在10个文件夹中

# ****************设置数据增强方式**************************
# 针对训练集train_data
train_transforms = transforms.Compose(
    [transforms.RandomResizedCrop(size=256, scale=(0.8, 1.0)),  # 随机裁剪到256*256
     transforms.RandomRotation(degrees=15),  # 随机旋转
     transforms.RandomHorizontalFlip(),  # 随机水平翻转
     transforms.CenterCrop((224,224)),  # 中心裁剪到224*224
     transforms.ToTensor(),  # 转化成张量,#归一化[0,1](是将数据除以255),transforms.ToTensor()会把HWC会变成C *H *W(拓展:格式为(h,w,c),像素顺序为RGB)

     transforms.Normalize([0.485, 0.456, 0.406],
                          [0.229, 0.224, 0.225])  # 标准化
     ])
# 针对测试集,test data,测试就不需要随机中心裁剪了,直接resize到224*224
test_valid_transforms = transforms.Compose(
    [transforms.Resize((224,224)),
     transforms.ToTensor(),
     transforms.Normalize([0.485, 0.456, 0.406],
                          [0.229, 0.224, 0.225])])
# *************************通过pytorch自带的dataload加载数据**************************
# 关于dataload 可以查看 https://blog.csdn.net/weixin_40123108/article/details/85099449
# ImageFolder假设所有的文件按文件夹保存,每个文件夹下存储同一个类别的图,详细的可以去了解这个类ImageFolder,主要关注__getitem__函数,该函数会根据索引返回每张图和label
# 关于torch.utils.data.DataLoader,数据加载器,结合了数据集和取样器,并且可以提供多个线程处理数据集。
# 在训练模型时使用到此函数,用来把训练数据分成多个小组,此函数每次抛出一组数据。直至把所有的数据都抛出。就是做一个数据的初始化。
# 可以参考学习 https://zhuanlan.zhihu.com/p/28200166    https://www.jb51.net/article/184042.htm
train_datasets = datasets.ImageFolder(TRAIN_DATASET_DIR, transform=train_transforms)
train_dataloader = torch.utils.data.DataLoader(train_datasets, batch_size=TRAIN_BATCH_SIZE, shuffle=True)
train_data_size = len(train_datasets)
valid_datasets = datasets.ImageFolder(VALID_DATASET_DIR, transform=test_valid_transforms)
valid_dataloader = torch.utils.data.DataLoader(valid_datasets, batch_size=TEST_BATCH_SIZE, shuffle=True)
valid_data_size = len(valid_datasets)


# ****************************可以通过运行test_data函数查看数据类型**************************************

def test_data():
    print("train_dataloade len", len(train_dataloader))
    for images, labels in train_dataloader:
        print(labels)
        print("label len", len(labels))
        img = images[0]
        img = img.numpy()
        img = np.transpose(img, (1, 2, 0)) # C*H*W -> H*W*C
        plt.imshow(img)
        plt.show()
        break


# *******************使用预训练模型 resnet50进行fineturn**************************
# 修改最后一层fc
def resnet50():
    model = models.resnet50(pretrained=True)
    for param in model.parameters():
        param.requires_grad = False
    fc_inputs = model.fc.in_features
    model.fc = nn.Sequential(
        nn.Linear(fc_inputs, 256),
        nn.ReLU(),
        nn.Dropout(0.4),
        nn.Linear(256, 10),
        nn.LogSoftmax(dim=1)
    )
    return model

# main函数中已申请
# ********************定义损失函数和优化器*********************
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
model = resnet50().to(device)
loss_func = nn.NLLLoss()
optimizer = optim.Adam(resnet50().parameters())


# **********************定义训练和验证过程***************************
def train(model, device, train_loader, optimizer, epoch):
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        if batch_idx % show_interval_num == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                       100. * batch_idx / len(train_loader), loss.item()))


# ****************************定义main*****************************
def main():

    # train_transforms = transforms.Compose(
    #     [transforms.RandomResizedCrop(size=256, scale=(0.8, 1.0)),  # 随机裁剪到256*256
    #      transforms.RandomRotation(degrees=15),  # 随机旋转
    #      transforms.RandomHorizontalFlip(),  # 随机水平翻转
    #      transforms.CenterCrop(size=224),  # 中心裁剪到224*224
    #      transforms.ToTensor(),  # 转化成张量,#归一化
    #      transforms.Normalize([0.485, 0.456, 0.406],
    #                           [0.229, 0.224, 0.225])  # 标准化
    #      ])
    # # 针对测试集,test data,测试就不需要随机中心裁剪了,直接resize到224*224
    # test_valid_transforms = transforms.Compose(
    #     [transforms.Resize(224),
    #      transforms.ToTensor(),
    #      transforms.Normalize([0.485, 0.456, 0.406],
    #                           [0.229, 0.224, 0.225])])
    # train_datasets = datasets.ImageFolder(TRAIN_DATASET_DIR, transform=train_transforms)
    # train_dataloader = torch.utils.data.DataLoader(train_datasets,
    #                                                batch_size=TRAIN_BATCH_SIZE,
    #                                                shuffle=True,
    #                                                **kwargs)
    # valid_datasets = datasets.ImageFolder(VALID_DATASET_DIR, transform=test_valid_transforms)
    # valid_dataloader = torch.utils.data.DataLoader(valid_datasets,
    #                                                batch_size=TEST_BATCH_SIZE,
    #                                                shuffle=True,
    #                                                **kwargs)
    # model = resnet50().to(device)
    # optimizer = optim.Adam(model.parameters(), lr=0.001)
    # ***********************print flops and params**************************
    for epoch in range(1, epochs + 1):
        train(model, device, train_dataloader, optimizer, epoch)
        test_acc = test(model, device, valid_dataloader)
        # report intermediate result
        print('test accuracy %g', test_acc)
    # report final result
    print('Final result is %g', test_acc)


def test(model, device, test_loader):
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            # sum up batch loss
            test_loss += F.nll_loss(output, target, reduction='sum').item()
            # get the index of the max log-probability
            pred = output.argmax(dim=1, keepdim=True)
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)
    accuracy = 100. * correct / len(test_loader.dataset)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset), accuracy))
    return accuracy


if __name__ == "__main__":
    main()
    pass

2. Result output:

Guess you like

Origin blog.csdn.net/qimo601/article/details/127126695