数据集下载地址:https://www.lanzouw.com/b01i4vc4b
密码:6y8b
注意:文件大小494M,有点大,百度云太慢不考虑,蓝奏云只能上传小于100M的,所以,将训练集拆分压缩了,使用时请将Training(0~30)和Training(31~61)合并到一个文件夹中
这个数据集有62个类别,可以做分类任务,数据量不是很多,所以我使用旋转、翻转对数据集进行增强。得到18300个图片。
类别:62类,训练集和测试集都有标签
训练集:4575张图片,下面的代码我通过旋转,翻转对数据增强得到18300张图片数据
测试集:2520张图片
完整代码在最下面:
下面数据处理和训练过程的代码:
定义数据增强的transform,如果你不想增强数据,只用data_transforms即可
# 定义图片增强要用的transforms
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch import nn
import torch
import torch.nn.functional as F
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# data augmentation for training and test time
# Resize all images to 32 * 32 and normalize them to mean = 0 and standard-deviation = 1 based on statistics collected from the training set
data_transforms = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.3337, 0.3064, 0.3171), ( 0.2672, 0.2564, 0.2629))
])
# Resize, normalize and rotate image
data_rotate = transforms.Compose([
transforms.Resize((32, 32)),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize((0.3337, 0.3064, 0.3171), ( 0.2672, 0.2564, 0.2629))
])
# Resize, normalize and flip image horizontally and vertically
data_hvflip = transforms.Compose([
transforms.Resize((32, 32)),
transforms.RandomHorizontalFlip(1),
transforms.RandomVerticalFlip(1),
transforms.ToTensor(),
transforms.Normalize((0.3337, 0.3064, 0.3171), ( 0.2672, 0.2564, 0.2629))
])
# Resize, normalize and shear image
data_shear = transforms.Compose([
transforms.Resize((32, 32)),
transforms.RandomAffine(degrees = 15,shear=2),
transforms.ToTensor(),
transforms.Normalize((0.3337, 0.3064, 0.3171), ( 0.2672, 0.2564, 0.2629))
])
加载数据集, 制作train_dataset时,我使用了三个上面的三个增强数据的transforms,你也可以只用第一个,或则使用其中几个
train_dir = './data/TSC/Training/'
test_dir = './data/TSC/Testing/'
train_dataset = torch.utils.data.ConcatDataset([
#数据增强,图片旋转剪切
datasets.ImageFolder(train_dir,transform=data_transforms),
datasets.ImageFolder(train_dir,transform=data_rotate),
datasets.ImageFolder(train_dir,transform=data_hvflip),
datasets.ImageFolder(train_dir,transform=data_shear)])
test_dataset = datasets.ImageFolder(test_dir,transform=data_transforms)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True, num_workers=1)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=64, shuffle=True, num_workers=1)
定义模型:
class CNN_TSC(nn.Module):
def __init__(self):
super(CNN_TSC, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 62)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
output = self.fc3(x)
return output
net= CNN_TSC().to(device)
#定义损失函数和优化器
loss = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.001)
训练和预测:
#训练
def train():
net.train()
for data, label in train_loader:
data, label = data.to(device), label.to(device)
net.zero_grad()
output = net(data)
l = loss(output, label)
l.backward()
optimizer.step()
#测试
def test(epoch):
net.eval()
batch_loss, correct, total = 0,0 ,0
for data, label in test_loader:
data, label = data.to(device), label.to(device)
output = net(data)
batch_loss +=loss(output, label)
predict_label = torch.argmax(output, dim=1)
correct += torch.sum(predict_label==label)
total +=len(label)
print('epoch:%d loss: %.4f accuracy:%.2f%%' %(epoch, batch_loss/len(test_loader), 100*correct/total))
print('training on %s'%(device))
for epoch in range(10):
train()
test(epoch)
实验结果:
完整的代码:
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch import nn
import torch
import torch.nn.functional as F
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# data augmentation for training and test time
# Resize all images to 32 * 32 and normalize them to mean = 0 and standard-deviation = 1 based on statistics collected from the training set
data_transforms = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.3337, 0.3064, 0.3171), ( 0.2672, 0.2564, 0.2629))
])
# Resize, normalize and rotate image
data_rotate = transforms.Compose([
transforms.Resize((32, 32)),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize((0.3337, 0.3064, 0.3171), ( 0.2672, 0.2564, 0.2629))
])
# Resize, normalize and flip image horizontally and vertically
data_hvflip = transforms.Compose([
transforms.Resize((32, 32)),
transforms.RandomHorizontalFlip(1),
transforms.RandomVerticalFlip(1),
transforms.ToTensor(),
transforms.Normalize((0.3337, 0.3064, 0.3171), ( 0.2672, 0.2564, 0.2629))
])
# Resize, normalize and shear image
data_shear = transforms.Compose([
transforms.Resize((32, 32)),
transforms.RandomAffine(degrees = 15,shear=2),
transforms.ToTensor(),
transforms.Normalize((0.3337, 0.3064, 0.3171), ( 0.2672, 0.2564, 0.2629))
])
#请修改这个目录为你的数据集目录
train_dir = './data/TSC/Training/'
test_dir = './data/TSC/Testing/'
train_dataset = torch.utils.data.ConcatDataset([
#数据增强,图片旋转剪切
datasets.ImageFolder(train_dir,transform=data_transforms),
datasets.ImageFolder(train_dir,transform=data_rotate),
datasets.ImageFolder(train_dir,transform=data_hvflip),
datasets.ImageFolder(train_dir,transform=data_shear)])
test_dataset = datasets.ImageFolder(test_dir,transform=data_transforms)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True, num_workers=1)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=64, shuffle=True, num_workers=1)
class CNN_TSC(nn.Module):
def __init__(self):
super(CNN_TSC, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 62)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
output = self.fc3(x)
return output
net= CNN_TSC().to(device)
#定义损失函数和优化器
loss = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.001)
#训练
def train():
net.train()
for data, label in train_loader:
data, label = data.to(device), label.to(device)
net.zero_grad()
output = net(data)
l = loss(output, label)
l.backward()
optimizer.step()
#测试
def test(epoch):
net.eval()
batch_loss, correct, total = 0,0 ,0
for data, label in test_loader:
data, label = data.to(device), label.to(device)
output = net(data)
batch_loss +=loss(output, label)
predict_label = torch.argmax(output, dim=1)
correct += torch.sum(predict_label==label)
total +=len(label)
print('epoch:%d loss: %.4f accuracy:%.2f%%' %(epoch, batch_loss/len(test_loader), 100*correct/total))
print('training on %s'%(device))
for epoch in range(10):
train()
test(epoch)