深度学习(七十三)pytorch学习笔记

版权声明:本文为博主原创文章,欢迎转载,转载请注明原文地址、作者信息。 https://blog.csdn.net/hjimce/article/details/79686464

先吐槽一下,深度学习发展速度真的很快,深度学习框架也在逐步迭代,真的是苦了俺这搞深度学习程序员。本人从三年前开始学深度学习开始,这些深度学习框架也是一个换过一个,从keras、theano、caffe、darknet、tensorflow,最后到现在要开始使用pytorch。

一、变量、求导torch.autograd模块

默认的variable定义的时候,requires_grad是false,变量是不可导的,如果设置为true,表示变量可导。

#coding=utf-8
#requires_grad默认为false
# 如果调用backward的时候,所有的变量都是不可导的,那么最后会报出没有可到变量的错误
import torch
from torch import  autograd
input=torch.FloatTensor([1,2,3])
input_v=autograd.Variable(input,requires_grad=True)
loss=torch.mean(input_v)

print loss.requires_grad
loss.backward()
print input_v
print input_v.grad

二、数据层及其变换

from PIL import Image
import torchvision
import matplotlib.pyplot as plt
import numpy as np
#数据变换
data_transform_train = torchvision.transforms.Compose([
    torchvision.transforms.RandomRotation(30),
    torchvision.transforms.RandomCrop((32,32)),
    torchvision.transforms.RandomHorizontalFlip(),
    torchvision.transforms.ToTensor(),
    torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
data_transform_eval=torchvision.transforms.Compose([
    torchvision.transforms.CenterCrop((32,32)),
    torchvision.transforms.ToTensor(),
    torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
#对于自定义的数据,要重载下面三个函数getitem、len、init
class mydata(Dataset):
    def __init__(self,label_file,image_root,is_train=True):
        self.imagepaths=[]
        self.labels=[]
        self.is_train=is_train
        if is_train:
            self.transforms=data_transform_train
        else:
            self.transforms=data_transform_eval

        with open(label_file,'r') as f:
            for line in f.readlines():#读取label文件
                self.imagepaths.append(os.path.join(image_root,line.split()[0]))
                self.labels.append(int(line.split()[1]))
    def __getitem__(self, item):
        x=Image.open(self.imagepaths[item]).resize((35,35))

        y=self.labels[item]
        if self.is_train:
            return [self.transforms(x),self.transforms(x)], y
        else:
            return self.transforms(x),y
    def __len__(self):
        return len(self.imagepaths)

def make_weights_for_balanced_classes(labels, nclasses):
    count = {}
    for item in labels:
        if count.has_key(item):
            count[item] += 1
        else:
            count[item]=1
    weight_per_class ={}
    N = len(labels)
    for key,value in count.items():
        weight_per_class[key] = N/float(value)
    weight = [0] * len(labels)
    for idx, val in enumerate(labels):
        weight[idx] = weight_per_class[val]
    return weight


train_data=mydata('data/train.txt','./',is_train=True)
weights = make_weights_for_balanced_classes(train_data.labels, 3)
weights = torch.DoubleTensor(weights)
sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))
train_dataloader_student=DataLoader(train_data,batch_size=6,sampler=sampler)
for x,y in train_dataloader_student:
    for xi in x:
        print y
        npimg = torchvision.utils.make_grid(xi).numpy()#可视化显示
        plt.imshow(np.transpose(npimg, (1, 2, 0)))
        plt.show()

三、网络架构

from moving_avarage_layer import conv2d_moving
import torch
from torch import  autograd,nn
from torch.utils.data import DataLoader, Dataset
from data_layer import mydata,make_weights_for_balanced_classes
import torchvision
import numpy as np
import matplotlib.pyplot as plt
import torch.nn.functional as function
import os
import time


class MobileNet(nn.Module):
    def __init__(self):
        super(MobileNet, self).__init__()

        def conv_bn(inp, oup, stride):
            return nn.Sequential(
                nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
                nn.BatchNorm2d(oup),
                nn.ReLU(inplace=True)
            )

        def conv_dw(inp, oup, stride):
            return nn.Sequential(
                nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
                nn.BatchNorm2d(inp),
                nn.ReLU(inplace=True),

                nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
                nn.BatchNorm2d(oup),
                nn.ReLU(inplace=True),
            )

        self.model = nn.Sequential(
            conv_bn(  3,  32, 2),
            conv_dw( 32,  64, 1),
            conv_dw( 64, 96, 2),
            conv_dw(96, 96, 1),
            conv_dw(96, 128, 2),
            conv_dw(128, 128, 1),
            conv_dw(128, 256, 2),
            conv_dw(256, 256, 1),
            conv_dw(256, 512, 1),
            nn.AvgPool2d(2),
        )
        self.fc = nn.Linear(512, 4)

    def forward(self, x):
        x = self.model(x)
        #print x.shape
        x = x.view(-1, 512)
        x = self.fc(x)
        return x

四、优化求解

def update_ema_variables(model, ema_model,alpha):
    for ema_param, param in zip(ema_model.parameters(), model.parameters()):
        ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def softmax_mse_loss(input_logits, target_logits):
    assert input_logits.size() == target_logits.size()
    input_softmax = function.softmax(input_logits, dim=1)
    target_softmax = function.softmax(target_logits, dim=1)
    num_classes = input_logits.size()[1]
    return function.mse_loss(input_softmax, target_softmax, size_average=False) / num_classes

torch.backends.cudnn.enabled = False
torch.manual_seed(7)

net_student=MobileNet().cuda()
net_teacher=MobileNet().cuda()
for param in net_teacher.parameters():
                param.detach_()
if os.path.isfile('teacher.pt'):
    net_student.load_state_dict(torch.load('teacher.pt'))





train_data=mydata('data/race/train.txt','./',is_train=True)
min_batch_size=32
weights = make_weights_for_balanced_classes(train_data.labels, 5)
weights = torch.DoubleTensor(weights)
sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))
train_dataloader=DataLoader(train_data,batch_size=min_batch_size,sampler=sampler,num_workers=8)


valid_data=mydata('data/race/val.txt','./',is_train=False)
valid_dataloader=DataLoader(valid_data,batch_size=min_batch_size,shuffle=True,num_workers=8)




classify_loss_function = torch.nn.CrossEntropyLoss(size_average=False,ignore_index=-1).cuda()
optimizer = torch.optim.SGD(net_student.parameters(),lr = 0.001, momentum=0.9)




globals_step=0
for epoch in range(10000):
    globals_classify_loss=0
    globals_consistency_loss = 0
    net_student.train()
    start=time.time()
    end=0
    for index,(x,y) in enumerate(train_dataloader):
        optimizer.zero_grad()  #

        x_student=autograd.Variable(x[0]).cuda()
        y=autograd.Variable(y).cuda()
        predict_student=net_student(x_student)

        classify_loss=classify_loss_function(predict_student,y)/min_batch_size
        sum_loss = classify_loss

        x_teacher= autograd.Variable(x[1],volatile=True).cuda()
        predict_teacher = net_teacher(x_teacher)
        ema_logit = autograd.Variable(predict_teacher.detach().data, requires_grad=False)
        consistency_loss =softmax_mse_loss(predict_student,ema_logit)/min_batch_size
        consistency_weight=1
        sum_loss+=consistency_weight*consistency_loss
        globals_consistency_loss += consistency_loss.data[0]

        sum_loss.backward()
        optimizer.step()
        alpha = min(1 - 1 / (globals_step + 1), 0.99)
        update_ema_variables(net_student, net_teacher, alpha)



        globals_classify_loss +=classify_loss.data[0]
        globals_step += 1


    if epoch%5!=0:
        continue

    net_student.eval()
    correct = 0
    total = 0
    for images, labels in valid_dataloader:
        valid_input=autograd.Variable(images,volatile=True).cuda()
        outputs = net_student(valid_input)
        #print outputs.shape
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted.cpu() == labels).sum()
    print "epoch:%d"%epoch,"time:%d"%(time.time()-start),'accuracy %d' % (
        100 * correct / total),"consistency loss:%f"%globals_consistency_loss,'classify loss%f:'%globals_classify_loss
    torch.save(net_student.state_dict(),'teacher.pt')

猜你喜欢

转载自blog.csdn.net/hjimce/article/details/79686464