模型训练,预测,数据集调用

模型做预测时,各种值的打印。
MobileNetV2模型预测单张图:

import os
import json
import torch
from PIL import Image
from torchvision import transforms
import matplotlib.pyplot as plt
from model_v2 import MobileNetV2


def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    data_transform = transforms.Compose(
        [transforms.Resize(256),
         transforms.CenterCrop(224),
         transforms.ToTensor(),
         transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])

    # load image
    img_path = "2.jpg"
    assert os.path.exists(img_path), "file: '{}' dose not exist.".format(img_path)
    img = Image.open(img_path)
    plt.imshow(img)
    # [N, C, H, W]
    img = data_transform(img)
    # expand batch dimension
    img = torch.unsqueeze(img, dim=0)
    print("img:",img)  #  [batchsize,c,h,w]
    # read class_indict
    json_path = './class_indices.json'
    assert os.path.exists(json_path), "file: '{}' dose not exist.".format(json_path)

    with open(json_path, "r") as f:
        class_indict = json.load(f)
        
    # create model
    model = MobileNetV2(num_classes=5).to(device)
    # load model weights
    model_weight_path = "./MobileNetV2.pth"
    model.load_state_dict(torch.load(model_weight_path, map_location=device))
    model.eval()
    with torch.no_grad():
        # predict class
        output = model(img.to(device)).cpu()
        print("output:",output)
        print("output.shape:",output.shape)
        output = torch.squeeze(output) #压缩batch维度
        predict = torch.softmax(output, dim=0)#从scores变成概率
        predict_cla = torch.argmax(predict).numpy()

    print_res = "class: {}   prob: {:.3}".format(class_indict[str(predict_cla)],
                                                 predict[predict_cla].numpy())
    plt.title(print_res)
    print(predict)
    for i in range(len(predict)):
        print("class: {:10}   prob: {:.3}".format(class_indict[str(i)],
                                                  predict[i].numpy()))
    plt.show()


if __name__ == '__main__':
    main()

输出:


img: tensor([[[[ 1.9407,  1.9407,  1.9749,  ..., -0.0458,  0.9303,  1.4612], #增加了batch维度,目前是只有一张图,
          [ 1.8893,  1.9407,  1.9578,  ..., -0.5082, -0.0287,  0.8789],						# 但放到模型里需要batch维度
          [ 1.8379,  1.8722,  1.8037,  ..., -0.1828, -0.3883, -0.0458],
          ...,
          [-1.7069, -1.6727, -1.6898,  ..., -0.5082,  0.2967, -0.0287],
          [-1.7412, -1.7069, -1.7412,  ..., -0.4911,  0.4679, -0.4739],
          [-1.7412, -1.7925, -1.7925,  ..., -0.3712,  0.1254, -0.1143]],

         [[ 2.4111,  2.3585,  2.2710,  ..., -0.7227,  0.2577,  1.2906],
          [ 2.2710,  2.2360,  2.2010,  ..., -0.7402, -0.8277,  0.0476],
          [ 2.0259,  2.0084,  1.9734,  ..., -0.2850, -0.6702, -0.7052],
          ...,
          [-1.6155, -1.7031, -1.6155,  ..., -0.3375,  0.3452,  0.1352],
          [-1.6856, -1.6681, -1.7031,  ..., -0.3200,  0.3627,  0.2052],
          [-1.7381, -1.7206, -1.7206,  ..., -0.0574,  0.2402,  0.1527]],

         [[-0.4450, -0.0790, -0.0615,  ..., -1.1073, -0.7238,  0.6879],
          [-0.5321, -0.1312, -0.2010,  ..., -0.7587, -1.2467, -0.7936],
          [-0.3055, -0.0790, -0.2010,  ..., -0.0092, -0.4973, -1.0376],
          ...,
          [-1.3513, -1.4559, -1.4384,  ..., -0.0790,  0.5136,  0.4439],
          [-1.4733, -1.4733, -1.4907,  ..., -0.0790,  0.5311,  0.5136],
          [-1.4210, -1.4036, -1.4907,  ...,  0.3393,  0.4265,  0.3219]]]])
          
output: tensor([[ 0.4225, -1.3952, -1.5308, -0.4893, -2.1681]])#真正的
output.shape: torch.Size([1, 5])
tensor([0.5615, 0.0912, 0.0796, 0.2256, 0.0421]) #先UNsqueeze了batch维度,再经过softmax变成 预测概率 

class: daisy        prob: 0.561
class: dandelion    prob: 0.0912
class: roses        prob: 0.0796
class: sunflowers   prob: 0.226
class: tulips       prob: 0.0421

多张图预测见下篇博客数据集导入预处理和多张图片预测

使用CIFAR10做样本:

    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

    # 50000张训练图片
    # 第一次使用时要将download设置为True才会自动去下载数据集
    #图片和对应label值都在这得到了
    train_set = torchvision.datasets.CIFAR10(root='./data', train=True,
                                             download=False, transform=transform)
    train_loader = torch.utils.data.DataLoader(train_set, batch_size=36,#一个batch里的图片数
                                               shuffle=True, num_workers=0)

    # 10000张验证图片
    # 第一次使用时要将download设置为True才会自动去下载数据集
    val_set = torchvision.datasets.CIFAR10(root='./data', train=False,
                                           download=False, transform=transform)
    val_loader = torch.utils.data.DataLoader(val_set, batch_size=10000,
                                             shuffle=False, num_workers=0)#train_dataset随机翻转和裁剪是为了抑制过拟合,提升模型泛化能力。val_dataset当中无此操作,只是resize到224作为模型的输入尺寸
    val_data_iter = iter(val_loader)#转化为迭代器
    val_image, val_label = val_data_iter.next()#得到一批图像和标签

加载自己的样本做多batch的训练时

    data_transform = {
    
    
        "train": transforms.Compose([transforms.RandomResizedCrop(224),
                                     transforms.RandomHorizontalFlip(),
                                     transforms.ToTensor(),
                                     transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),
        "val": transforms.Compose([transforms.Resize(256),
                                   transforms.CenterCrop(224),
                                   transforms.ToTensor(),
                                   transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])}

    data_root = os.path.abspath(os.path.join(os.getcwd(), "../.."))  # get data root path
    image_path = os.path.join(data_root, "data_set", "flower_data")  # flower data set path
    assert os.path.exists(image_path), "{} path does not exist.".format(image_path)
    train_dataset = datasets.ImageFolder(root=os.path.join(image_path, "train"),#加载train数据集
                                         transform=data_transform["train"])#处理方式
    train_num = len(train_dataset)

    # {'daisy':0, 'dandelion':1, 'roses':2, 'sunflower':3, 'tulips':4}
    flower_list = train_dataset.class_to_idx # 分类名称对应的索引
    cla_dict = dict((val, key) for key, val in flower_list.items()) # 将key,value值反过来,已达到通过索引找到分类的目的
    # write dict into json file
    json_str = json.dumps(cla_dict, indent=4) # 编码成json格式
    with open('class_indices.json', 'w') as json_file: # 写进去
        json_file.write(json_str)

    #nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])  # number of workers
    nw = 0
    print('Using {} dataloader workers every process'.format(nw))

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size, shuffle=True,
                                               num_workers=nw)

    validate_dataset = datasets.ImageFolder(root=os.path.join(image_path, "val"),
                                            transform=data_transform["val"])
    val_num = len(validate_dataset)
    validate_loader = torch.utils.data.DataLoader(validate_dataset,
                                                  batch_size=batch_size, shuffle=False,
                                                  num_workers=nw)

    print("using {} images for training, {} images for validation.".format(train_num,
                                                                           val_num))

训练和验证样本的调用
(好几个batch的图片都放在train_loader和val_loader里,使用时用
for val_data in val_loader或for step, data in enumerate(train_loader)来循环得到每个batch里的图片)

train_steps = len(train_loader)
    for epoch in range(epochs):
        # train
        net.train()
        running_loss = 0.0
        train_bar = tqdm(train_loader, file=sys.stdout) #加载条,可以去掉,更改成
        for step, data in enumerate(train_bar):             #enumerate(train_loader)
            images, labels = data
            optimizer.zero_grad()
            logits = net(images.to(device))
            loss = loss_function(logits, labels.to(device))
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss += loss.item()

            train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(epoch + 1,
                                                                     epochs,
                                                                     loss)

        # validate
        net.eval()
        acc = 0.0  # accumulate accurate number / epoch
        with torch.no_grad():
            val_bar = tqdm(validate_loader, file=sys.stdout)
            for val_data in val_bar:
                val_images, val_labels = val_data
                outputs = net(val_images.to(device))
                # loss = loss_function(outputs, test_labels)
                predict_y = torch.max(outputs, dim=1)[1]
                acc += torch.eq(predict_y, val_labels.to(device)).sum().item()

                val_bar.desc = "valid epoch[{}/{}]".format(epoch + 1,
                                                           epochs)
        val_accurate = acc / val_num
        print('[epoch %d] train_loss: %.3f  val_accuracy: %.3f' %
              (epoch + 1, running_loss / train_steps, val_accurate))

        if val_accurate > best_acc:#在正确率最高的时候保存
            best_acc = val_accurate
            torch.save(net.state_dict(), save_path)

    print('Finished Training')

猜你喜欢

转载自blog.csdn.net/weixin_44040169/article/details/127988652
今日推荐