Pytorch--CNN实现kaggle猫狗大战(Dogs vs. Cats)

今天咱们来聊聊用Pytorch的CNN完成kaggle猫狗大战。

话不多说,进入正题。
首先,图片数据来源kaggle,在网站上搜索Dogs vs. Cats很多相关图片集,找一个下载下来。

我这里采用的数据集是:

  • Train:4000张cat + 4000张dog
  • Test:1000张cat + 1000张dog

Pytorch版本:(torch 1.3.1+cpu) + (torchvision 0.4.2+cpu)

步骤:
1. 重定义我们的Dataset
2. 定义我们的Pytorch CNN结构
3. 利用定义好的Dataset,载入我们的数据集
4. 创建CNN实例
5. 定义loss损失函数和我们的神经网络优化器
6. 训练
7. 测试,查看正确率

开始:

首先引入一些要用的库:

import os
import torch
from torchvision import transforms,datasets
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import torch.optim as optim
from PIL import Image

定义DataTransform:

data_transform = transforms.Compose([
    transforms.Resize(84),
    transforms.CenterCrop(84),
    transforms.ToTensor(),
    transforms.Normalize(mean = [0.485, 0.456, 0.406],std = [0.229, 0.224, 0.225])
])

重定义Dataset:

class MyDataSet(Dataset):
    def __init__(self, txtPath, data_transform):
        self.imgPathArr = []
        self.labelArr = []
        with open(txtPath, "rb") as f:
            txtArr = f.readlines()
        for i in txtArr:
            fileArr = str(i.strip(), encoding = "utf-8").split(" ")
            self.imgPathArr.append(fileArr[0])
            self.labelArr.append(fileArr[1])
        self.transforms = data_transform

    def __getitem__(self, index):
        label = np.array(int(self.labelArr[index]))
        img_path = self.imgPathArr[index]
        pil_img = Image.open(img_path)
        if self.transforms:
            data = self.transforms(pil_img)
        else:
            pil_img = np.asarray(pil_img)
            data = torch.from_numpy(pil_img)
        return data, label

    def __len__(self):
        return len(self.imgPathArr)

这里我给Dataset传入了一个txt文件以及我上面定义的data_transform,这里主要说一下我的txt文件里的内容是图片路径+图片的label,这里0就是cat,1就是dog,到时候我的Dataset就会根据我txt里的内容创建相应的数据集(图片+label),各位可以自己写一个简单的Python脚本去快速的遍历文件夹下的图片同时添加对应的Label,再将这些信息写入txt文件中。

当然,这只是我按照我的风格来重定义Dataset的,各位完全能按照自己的想法去定义自己的Dataset数据集格式,只要符合Pytorch的标准,不一定要按照我这种方式。
CNNTXTCAT
CNNTXTDOG
搭建Pytorch CNN:

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Sequential(         
            nn.Conv2d(3, 16, 5, 1, 2),                              
            nn.ReLU(),                      
            nn.MaxPool2d(kernel_size=2),    
        )
        self.conv2 = nn.Sequential(         
            nn.Conv2d(16, 32, 5, 1, 2),     
            nn.ReLU(),                      
            nn.MaxPool2d(2),                
        )
        self.out = nn.Linear(32 * 21 * 21, 2)   

    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = x.view(x.size(0), -1)           
        output = self.out(x)
        return output, x  

设置loss损失函数、以及CNN优化器,同时创建数据集拿到CNN中训练:
这里附上我的文件结构:
在这里插入图片描述
img文件夹下:
在这里插入图片描述
test文件夹下:
在这里插入图片描述

if __name__=='__main__':
    
    train_dataset = MyDataSet('./img/label.txt', data_transform) 
    train_loader = torch.utils.data.DataLoader(train_dataset,batch_size = 4,shuffle = True,num_workers = 4)

    test_dataset = MyDataSet('./test/label.txt', data_transform) 
    test_loader = torch.utils.data.DataLoader(test_dataset,batch_size = 1,shuffle = True,num_workers = 4)

    net = Net()
     
    cirterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(),lr = 0.0001,momentum = 0.9)

    # Train
    try:
        for epoch in range(3):
            running_loss = 0.0  
            for i,data in enumerate(train_loader,0):
                inputs,labels = data
                inputs,labels = Variable(inputs),Variable(labels.long())
                outputs = net(inputs)[0]
                loss = cirterion(outputs,labels)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                
                running_loss += loss.item()
                
                if i % 100 == 99:
                    print('[%d %5d] loss: %.3f' % (epoch + 1,i + 1,running_loss / 100))
                    running_loss = 0.0
    finally:
        print('finished training!')
        torch.save(net.state_dict(), 'net_params.pkl')

    # Test
    correct = 0
    total = 0
     
    for data in test_loader:
        images,labels = data
        images,labels = Variable(images),labels
        outputs = net(images)[0]

        predicted = torch.max(outputs.data,1)[1].data.numpy()
        total += labels.size(0)
        correct += (predicted == labels.numpy()).sum()
     
    print('Accuracy of the network on the 2000 test images: %d %%' % (100 * correct / total))

测试结果:
这里我就不重新训练CNN网络了,我这里是直接载入之前训练好的Pytorch参数net_params.pkl,最终kaggle猫狗大战准确率在73%。各位可以优化一下自己的CNN网络来提高这个数值。
CNNTest
最后附上所有代码:

import os
import torch
from torchvision import transforms,datasets
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import torch.optim as optim
from PIL import Image

class MyDataSet(Dataset):
    def __init__(self, txtPath, data_transform):
        self.imgPathArr = []
        self.labelArr = []
        with open(txtPath, "rb") as f:
            txtArr = f.readlines()
        for i in txtArr:
            fileArr = str(i.strip(), encoding = "utf-8").split(" ")
            self.imgPathArr.append(fileArr[0])
            self.labelArr.append(fileArr[1])
        self.transforms = data_transform

    def __getitem__(self, index):
        label = np.array(int(self.labelArr[index]))
        img_path = self.imgPathArr[index]
        pil_img = Image.open(img_path)
        if self.transforms:
            data = self.transforms(pil_img)
        else:
            pil_img = np.asarray(pil_img)
            data = torch.from_numpy(pil_img)
        return data, label

    def __len__(self):
        return len(self.imgPathArr)

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Sequential(         
            nn.Conv2d(
                in_channels=3,              
                out_channels=16,            
                kernel_size=5,              
                stride=1,                   
                padding=2,                  
            ),                              
            nn.ReLU(),                      
            nn.MaxPool2d(kernel_size=2),    
        )
        self.conv2 = nn.Sequential(         
            nn.Conv2d(16, 32, 5, 1, 2),     
            nn.ReLU(),                      
            nn.MaxPool2d(2),                
        )
        self.out = nn.Linear(32 * 21 * 21, 2)   

    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = x.view(x.size(0), -1)           
        output = self.out(x)
        return output, x    


data_transform = transforms.Compose([
    transforms.Resize(84),
    transforms.CenterCrop(84),
    transforms.ToTensor(),
    transforms.Normalize(mean = [0.485, 0.456, 0.406],std = [0.229, 0.224, 0.225])
])

if __name__=='__main__':
    
    train_dataset = MyDataSet('./img/label.txt', data_transform) 
    train_loader = torch.utils.data.DataLoader(train_dataset,batch_size = 4,shuffle = True,num_workers = 4)

    test_dataset = MyDataSet('./test/label.txt', data_transform) 
    test_loader = torch.utils.data.DataLoader(test_dataset,batch_size = 1,shuffle = True,num_workers = 4)

    net = Net()
     
    cirterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(),lr = 0.0001,momentum = 0.9)

    
    try:
        for epoch in range(3):
            running_loss = 0.0  
            for i,data in enumerate(train_loader,0):
                inputs,labels = data
                inputs,labels = Variable(inputs),Variable(labels.long())
                outputs = net(inputs)[0]
                loss = cirterion(outputs,labels)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                
                running_loss += loss.item()
                
                if i % 100 == 99:
                    print('[%d %5d] loss: %.3f' % (epoch + 1,i + 1,running_loss / 100))
                    running_loss = 0.0
    finally:
        print('finished training!')
        torch.save(net.state_dict(), 'net_params.pkl')

    correct = 0
    total = 0
     
    for data in test_loader:
        images,labels = data
        images,labels = Variable(images),labels
        outputs = net(images)[0]

        predicted = torch.max(outputs.data,1)[1].data.numpy()
        total += labels.size(0)
        correct += (predicted == labels.numpy()).sum()
     
    print('Accuracy of the network on the 2000 test images: %d %%' % (100 * correct / total))

在这里插入图片描述

猜你喜欢

转载自blog.csdn.net/HHH_LLL/article/details/104386427
今日推荐