pytorch:基本操作

import torch
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt

pytorch中有两种变量类型,一个是Tensor,一个是Variable。

  • Tensor: 就像ndarray一样,一维Tensor叫Vector,二维Tensor叫Matrix,三维及以上称为Tensor
  • Variable:是Tensor的一个wrapper,不仅保存了值,而且保存了这个值的creator,需要BP的网络都是Variable参与运算
# torch.Tensor(shape) 创建出一个未初始化的Tensor,但是还是可以打印出值的,这个应该是这块内存之前的数据
x = torch.Tensor(2,3,4)
# 这种方式创建出来的Tensor更多是用来接受其他数据的计算值的
x
(0 ,.,.) = 
  2.8404e+29  6.7111e+22  1.3556e-19  1.3563e-19
  1.3563e-19  1.3563e-19  1.3563e-19  4.4591e+30
  3.9173e-02  3.0386e+29  1.2134e+19  1.1625e+27

(1 ,.,.) = 
  4.5435e+30  7.1429e+31  1.8467e+20  7.1220e+28
  1.3556e-19  1.3563e-19  1.3563e-19  1.3563e-19
  1.3563e-19  1.3563e-19  1.3563e-19  1.3563e-19
[torch.FloatTensor of size 2x3x4]
x.size()
torch.Size([2, 3, 4])
a = torch.rand(2,3,4)
b = torch.rand(2,3,4)
# 使用Tensor()方法创建出来的Tensor用来接收计算结果,当然torch.add(..)也会返回计算结果的
_=torch.add(a,b,out=x)
x
(0 ,.,.) = 
  1.2776  1.5515  1.6725  1.5813
  1.8088  1.0150  0.4805  0.7575
  1.4111  1.1892  1.4516  1.4634

(1 ,.,.) = 
  1.5148  1.5504  1.2196  1.8959
  0.7730  1.4018  0.9021  1.6071
  1.4863  0.6581  0.8338  0.8901
[torch.FloatTensor of size 2x3x4]
"""所有带 _ 的operation,都会更改调用对象的值,"""
a.add_(b)
#例如 a=1;b=2;a就是3了,没有 _ 的operation就没有这种效果,只会返回运算结果
(0 ,.,.) = 
  1.2776  1.5515  1.6725  1.5813
  1.8088  1.0150  0.4805  0.7575
  1.4111  1.1892  1.4516  1.4634

(1 ,.,.) = 
  1.5148  1.5504  1.2196  1.8959
  0.7730  1.4018  0.9021  1.6071
  1.4863  0.6581  0.8338  0.8901
[torch.FloatTensor of size 2x3x4]
a
(0 ,.,.) = 
  1.2776  1.5515  1.6725  1.5813
  1.8088  1.0150  0.4805  0.7575
  1.4111  1.1892  1.4516  1.4634

(1 ,.,.) = 
  1.5148  1.5504  1.2196  1.8959
  0.7730  1.4018  0.9021  1.6071
  1.4863  0.6581  0.8338  0.8901
[torch.FloatTensor of size 2x3x4]
torch.cuda.is_available()
False

自动求导

  • pytorch的自动求导工具包在torch.autograd中
from torch.autograd import Variable
x = torch.rand(5)
x = Variable(x, requires_grad = True)
y = x * 2
#如果y是scalar的话,那么直接y.backward(),然后通过x.grad方式,就可以得到var的梯度
#如果y不是scalar,那么只能通过传参的方式给x指定梯度
grads = torch.FloatTensor([1,2,3,4,5])
y.backward(grads)
x.grad 
Variable containing:
  2
  4
  6
  8
 10
[torch.FloatTensor of size 5]

numpy array 和 torch tensor可以自由的转换

import torch
import numpy as np
np_data = np.arange(6).reshape((2,3))
torch_data = torch.from_numpy(np_data)
tensor2array = torch_data.numpy()
print(
    '\nnumpy array:', np_data,  
    '\ntorch tensor:', torch_data,
    '\ntensor to array:', tensor2array,
)
numpy array: [[0 1 2]
 [3 4 5]] 
torch tensor: 
 0  1  2
 3  4  5
[torch.IntTensor of size 2x3]

tensor to array: [[0 1 2]
 [3 4 5]]
# abs 绝对值计算
data = [-1, -2, 1, 2]
tensor = torch.FloatTensor(data)  # 转换成32位浮点 tensor
print(
    '\nabs',
    '\nnumpy: ', np.abs(data),          # [1 2 1 2]
    '\ntorch: ', torch.abs(tensor)      # [1 2 1 2]
)
abs 
numpy:  [1 2 1 2] 
torch:  
 1
 2
 1
 2
[torch.FloatTensor of size 4]
# sin   三角函数 sin
print(
    '\nsin',
    '\nnumpy: ', np.sin(data),      # [-0.84147098 -0.90929743  0.84147098  0.90929743]
    '\ntorch: ', torch.sin(tensor)  # [-0.8415 -0.9093  0.8415  0.9093]
)
sin 
numpy:  [-0.84147098 -0.90929743  0.84147098  0.90929743] 
torch:  
-0.8415
-0.9093
 0.8415
 0.9093
[torch.FloatTensor of size 4]
# mean  均值
print(
    '\nmean',
    '\nnumpy: ', np.mean(data),         # 0.0
    '\ntorch: ', torch.mean(tensor)     # 0.0
)
mean 
numpy:  0.0 
torch:  0.0

矩阵的乘法

# matrix multiplication 矩阵点乘
data = [[1,2], [3,4]]
tensor = torch.FloatTensor(data)  # 转换成32位浮点 tensor
# correct method
print(
    '\nmatrix multiplication (matmul)',
    '\nnumpy: ', np.matmul(data, data),     # [[7, 10], [15, 22]]
    '\ntorch: ', torch.mm(tensor, tensor)   # [[7, 10], [15, 22]]
)

# !!!!  下面是错误的方法 !!!!
data = np.array(data)
print(
    '\nmatrix multiplication (dot)',
    '\nnumpy: ', data.dot(data),        # [[7, 10], [15, 22]] 在numpy 中可行
    '\ntorch: ', tensor.dot(tensor)     # torch 会转换成 [1,2,3,4].dot([1,2,3,4) = 30.0
)
#新版本中(>=0.3.0), 关于 tensor.dot() 有了新的改变, 它只能针对于一维的数组. 所以上面的有所改变.
matrix multiplication (matmul) 
numpy:  [[ 7 10]
 [15 22]] 
torch:  
  7  10
 15  22
[torch.FloatTensor of size 2x2]




---------------------------------------------------------------------------

RuntimeError                              Traceback (most recent call last)

<ipython-input-44-e307c2bb5181> in <module>()
     14     '\nmatrix multiplication (dot)',
     15     '\nnumpy: ', data.dot(data),        # [[7, 10], [15, 22]] 在numpy 中可行
---> 16     '\ntorch: ', tensor.dot(tensor)     # torch 会转换成 [1,2,3,4].dot([1,2,3,4) = 30.0
     17 )
     18 #新版本中(>=0.3.0), 关于 tensor.dot() 有了新的改变, 它只能针对于一维的数组. 所以上面的有所改变.


RuntimeError: Expected argument self to have 1 dimension(s), but has 2 at c:\miniconda2\conda-bld\pytorch-cpu_1519449358620\work\torch\csrc\generic\TensorMethods.cpp:25700

变量

  • Variable:裝鸡蛋的篮子,鸡蛋数目不停变动
  • Tensor:鸡蛋
import torch
# torch中Variable模块
from torch.autograd import Variable

#先生鸡蛋
tensor = torch.FloatTensor([[1,2],[3,4]])
#把鸡蛋放到篮子里, requires_grad是要不要计算梯度
variable = Variable(tensor, requires_grad=True)

print(tensor,variable)
 1  2
 3  4
[torch.FloatTensor of size 2x2]
 Variable containing:
 1  2
 3  4
[torch.FloatTensor of size 2x2]

variable计算梯度

t_out = torch.mean(tensor*tensor) #x^2
v_out = torch.mean(variable*variable)
print(t_out,v_out)
7.5 Variable containing:
 7.5000
[torch.FloatTensor of size 1]
v_out.backward()    # 模拟 v_out 的误差反向传递
# 下面两步看不懂没关系, 只要知道 Variable 是计算图的一部分, 可以用来传递误差就好.
# v_out = 1/4 * sum(variable*variable) 这是计算图中的 v_out 计算步骤
# 针对于 v_out 的梯度就是, d(v_out)/d(variable) = 1/4*2*variable = variable/2
 # 初始 Variable 的梯度
print(variable.grad)  
Variable containing:
 1  2
 3  4
[torch.FloatTensor of size 2x2]

获取 Variable 里面的数据

  • 直接print(variable)只会输出 Variable 形式的数据, 在很多时候是用不了的
  • (比如想要用 plt 画图), 所以我们要转换一下, 将它变成 tensor 形式.
#Variable形式
print(variable) 
#tensor形式
print(variable.data)
#numpy形式
print(variable.data.numpy())
Variable containing:
 1  2
 3  4
[torch.FloatTensor of size 2x2]


 1  2
 3  4
[torch.FloatTensor of size 2x2]

[[1. 2.]
 [3. 4.]]

Torch 中的激励函数

import torch
import torch.nn.functional as F #激活函数都在这
from torch.autograd import Variable

# 做一些假数据来观看图像
x = torch.linspace(-5, 5, 200)  # x data (tensor), shape=(100, 1)
x = Variable(x)
x_np = x.data.numpy() # 

#几种常用的激活函数
y_relu = F.relu(x).data.numpy()
y_sigmoid =F.sigmoid(x).data.numpy()
y_tanh = F.tanh(x).data.numpy()
y_softplus = F.softplus(x).data.numpy()
# y_softmax = F.softmax(x)  softmax 比较特殊, 不能直接显示, 不过他是关于概率的, 用于分类
plt.figure(1, figsize=(8, 6))
plt.subplot(221)
plt.plot(x_np, y_relu, c='red', label='relu')
plt.ylim((-1, 5))
plt.legend(loc='best')

plt.subplot(222)
plt.plot(x_np, y_sigmoid, c='red', label='sigmoid')
plt.ylim((-0.2, 1.2))
plt.legend(loc='best')

plt.subplot(223)
plt.plot(x_np, y_tanh, c='red', label='tanh')
plt.ylim((-1.2, 1.2))
plt.legend(loc='best')

plt.subplot(224)
plt.plot(x_np, y_softplus, c='red', label='softplus')
plt.ylim((-0.2, 6))
plt.legend(loc='best')
<matplotlib.legend.Legend at 0x164816efe48>

这里写图片描述

关系拟合 (回归)

  • 我们创建一些假数据来模拟真实的情况. 比如一个一元二次函数: y = a * x^2 + b, 我们给 y 数据加上一点噪声来更加真实的展示它.

  • 建立数据集

x = torch.unsqueeze(torch.linspace(-1,1,100),dim=1) #x data (tensor), shape=(100, 1)
y = x.pow(2) + 0.2*torch.rand(x.size())  # noisy y data (tensor), shape=(100, 1)

#用variable来修饰这些数据tensor
x, y = Variable(x),Variable(y)

#画图
plt.scatter(x.data.numpy(), y.data.numpy())
plt.show()

这里写图片描述

  • 建立神经网络
class Net(torch.nn.Module): # 继承 torch 的 Module
    def __init__(self,n_feature,n_hidden,n_output):
        super(Net,self).__init__() #继承__init__功能
        #定义每层用什么样的形式
        self.hidden = torch.nn.Linear(n_feature, n_hidden) #隐藏层
        self.predict = torch.nn.Linear(n_hidden, n_output) #输出层

    def forward(self, x): # Module中的forward功能
        #正向传播输入值,神经网络分析出输出值
        x = F.relu(self.hidden(x))# 激励函数(隐藏层的线性值)
        x = self.predict(x)# 输出值
        return x
net = Net(n_feature=1,n_hidden=10,n_output=1)
print(net)
Net(
  (hidden): Linear(in_features=1, out_features=10, bias=True)
  (predict): Linear(in_features=10, out_features=1, bias=True)
)

训练网络

#optimizer 是训练工具

optimizer = torch.optim.SGD(net.parameters(),lr=0.5) #传入net的所有参数,学习率
loss_func = torch.nn.MSELoss() #预测值和真实值的误差计算公式(均方差)

for i in range(100):
    prediction = net(x) #喂给net训练数据x,输出预测值

    loss = loss_func(prediction, y) #计算两者的误差

    optimizer.zero_grad() #清空上一步的残余更新参数值
    loss.backward() #误差反向传播,计算参数更新值
    optimizer.step() # 将参数更新值施加到net的parameters上

可视化训练过程

plt.ion()
plt.show()
for t in range(100):
    prediction = net(x) #喂给net训练数据x,输出预测值

    loss = loss_func(prediction, y) #计算两者的误差

    optimizer.zero_grad() #清空上一步的残余更新参数值
    loss.backward() #误差反向传播,计算参数更新值
    optimizer.step() # 将参数更新值施加到net的parameters上

    if t % 5 == 0:
        # plt and show learning process
        plt.cla()
        plt.scatter(x.data.numpy(), y.data.numpy())
        plt.plot(x.data.numpy(),prediction.data.numpy(),'r-',lw=5)
        plt.text(0.5, 0, 'Loss=%.4f' % loss.data[0], fontdict={'size': 20, 'color':  'red'})
        plt.pause(0.1)

这里写图片描述

分类

  • 建立数据集
# 假数据
n_data = torch.ones(100, 2)         # 数据的基本形态
n_data.numpy().shape
(100, 2)
x0 = torch.normal(2*n_data, 1)      # 类型0 x data (tensor), shape=(100, 2)
y0 = torch.zeros(100)               # 类型0 y data (tensor), shape=(100, 1)
x1 = torch.normal(-2*n_data, 1)     # 类型1 x data (tensor), shape=(100, 2)
y1 = torch.ones(100)                # 类型1 y data (tensor), shape=(100, 1)
# 注意 x, y 数据的数据形式是一定要像下面一样 (torch.cat 是在合并数据)
x = torch.cat((x0, x1), 0).type(torch.FloatTensor)  # FloatTensor = 32-bit floating
y = torch.cat((y0, y1), ).type(torch.LongTensor)    # LongTensor = 64-bit integer
# torch 只能在 Variable 上训练, 所以把它们变成 Variable
x, y = Variable(x), Variable(y)
# 画图
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=y.data.numpy(), s=100, lw=0, cmap='RdYlGn')
plt.show()

这里写图片描述

建立神经网络

  • 建立一个神经网络我们可以直接运用 torch 中的体系. 先定义所有的层属性(init()),
  • 然后再一层层搭建(forward(x))层于层的关系链接. 这个和我们在前面 regression 的时候的神经网络基本没差.
import torch
import torch.nn.functional as F

class Net(torch.nn.Module):
    def __init__(self,n_feature, n_hidden,n_output):
        super(Net,self).__init__() #继承__init__功能
        self.hidden = torch.nn.Linear(n_feature, n_hidden) #隐藏层线性输出
        self.out = torch.nn.Linear(n_hidden,n_output)

    def forward(self,x):
        x = F.relu(self.hidden(x))
        x = self.out(x)
        return x


net = Net(n_feature=2, n_hidden=10, n_output=2) #几个类别就几个output

print(net)
Net(
  (hidden): Linear(in_features=2, out_features=10, bias=True)
  (out): Linear(in_features=10, out_features=2, bias=True)
)

训练网络

optimizer = torch.optim.SGD(net.parameters(), lr=0.02)
# 算误差的时候, 注意真实值!不是! one-hot 形式的, 而是1D Tensor, (batch,)
# 但是预测值是2D tensor (batch, n_classes)
loss_func = torch.nn.CrossEntropyLoss()

for t in range(100):
    out =net(x)

    loss = loss_func(out, y)

    optimizer.zero_grad()
    loss.backward()
    optimizer.step() #将参数更新值施加到net的parameter上
    # 接着上面来
    if t % 2 == 0:
        plt.cla()
        # 过了一道 softmax 的激励函数后的最大概率才是预测值
        prediction = torch.max(F.softmax(out), 1)[1]
        pred_y = prediction.data.numpy().squeeze()
        target_y = y.data.numpy()
        plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlGn')
        accuracy = sum(pred_y == target_y)/200  # 预测中有多少和真实值一样
        plt.text(1.5, -4, 'Accuracy=%.2f' % accuracy, fontdict={'size': 20, 'color':  'red'})
        plt.pause(0.1)
plt.ioff()  # 停止画图
plt.show()
C:\Users\dell\Anaconda3\lib\site-packages\ipykernel_launcher.py:18: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.

快速搭建

#我们用 class 继承了一个 torch 中的神经网络结构, 然后对其进行了修改, 不过还有更快的一招, 
#用一句话就概括了上面所有的内容!
net2 = torch.nn.Sequential(
    torch.nn.Linear(1, 10),
    torch.nn.ReLU(),
    torch.nn.Linear(10, 1)
)
print(net2)
Sequential(
  (0): Linear(in_features=1, out_features=10, bias=True)
  (1): ReLU()
  (2): Linear(in_features=10, out_features=1, bias=True)
)

保存

torch.manual_seed(1) #reproducible

#假数据
x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1) # x data(tensor)
y = x.pow(2) + 0.2 * torch.rand(x.size()) #noisy y data(tensor)
x, y = Variable(x, requires_grad=False),Variable(y,requires_grad=False)

def save():
    net1 = torch.nn.Sequential(
        torch.nn.Linear(1, 10),
        torch.nn.ReLU(),
        torch.nn.Linear(10, 1)
    )
    optimizer = torch.optim.SGD(net1.parameters(),lr=0.5)
    loss_func = torch.nn.MSELoss()

    #训练
    for t in range(100):
        prediction = net1(x)
        loss = loss_func(prediction, y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    torch.save(net1, 'net.pkl')  # 保存整个网络
    torch.save(net1.state_dict(), 'net_params.pkl')   # 只保存网络中的参数 (速度快, 占内存少)
save()
torch.unsqueeze?

提取网络

def restore_net():
    #restore entire net1 to net2
    net2 = torch.load('net.pkl')
    prediction = net2(x)

只提取网络参数:这种方式将会提取所有的参数, 然后再放到你的新建网络中.

def restore_params():
    #新建 net3
    net3 = torch.nn.Sequential(
        torch.nn.Linear(1, 10),
        torch.nn.ReLU(),
        torch.nn.Linear(10, 1)
    )
    """得建立一个和之前训练相同的model"""
    #将保存的参数复制到net3
    net3.load_state_dict(torch.load('net_params.pkl'))
    prediction = net3(x)

批训练

import torch
import torch.utils.data as Data
torch.manual_seed(1) #reproducible
BATCH_SIZE = 5 #批训练的数据个数

x = torch.linspace(1, 10 ,10)
y = torch.linspace(1, 10, 10)

#先转换torch能识别的Dataset
torch_dataset = Data.TensorDataset(data_tensor=x,target_tensor=y)

#把dataset放入DataLoader
loader = Data.DataLoader(
    dataset = torch_dataset,#torch TensorDateset format
    batch_size = BATCH_SIZE, #mini batch size
    shuffle = True, #是否打乱数据
    num_workers = 2 , # 多线程来读数据
)

for epoch in range(3): #训练所有!整套数据3次
    for step, (batch_x, batch_y) in enumerate(loader): #每次loader释放一小批数据用来学习
        # 假设这里就是你训练的地方...
        # 打出来一些数据
        print('Epoch: ', epoch, '| Step: ', step, '| batch x: ',
              batch_x.numpy(), '| batch y: ', batch_y.numpy())
Epoch:  0 | Step:  0 | batch x:  [ 4.  3.  6.  9. 10.] | batch y:  [ 4.  3.  6.  9. 10.]
Epoch:  0 | Step:  1 | batch x:  [8. 7. 2. 1. 5.] | batch y:  [8. 7. 2. 1. 5.]
Epoch:  1 | Step:  0 | batch x:  [1. 5. 4. 8. 9.] | batch y:  [1. 5. 4. 8. 9.]
Epoch:  1 | Step:  1 | batch x:  [ 6.  3. 10.  2.  7.] | batch y:  [ 6.  3. 10.  2.  7.]
Epoch:  2 | Step:  0 | batch x:  [ 5. 10.  8.  9.  7.] | batch y:  [ 5. 10.  8.  9.  7.]
Epoch:  2 | Step:  1 | batch x:  [1. 2. 3. 6. 4.] | batch y:  [1. 2. 3. 6. 4.]

加速神经网络训练 (Speed Up Training)

import torch
import torch.utils.data as Data
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt

torch.manual_seed(1)    # reproducible

LR = 0.01
BATCH_SIZE = 32
EPOCH = 12

# fake dataset
x = torch.unsqueeze(torch.linspace(-1, 1, 1000), dim=1)
y = x.pow(2) + 0.1*torch.normal(torch.zeros(*x.size()))

# plot dataset
plt.scatter(x.numpy(), y.numpy())
plt.show()

# 使用上节内容提到的 data loader
torch_dataset = Data.TensorDataset(data_tensor=x, target_tensor=y)
loader = Data.DataLoader(dataset=torch_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2,)
#每个优化器优化一个神经网络
# 默认的 network 形式
class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.hidden = torch.nn.Linear(1, 20)   # hidden layer
        self.predict = torch.nn.Linear(20, 1)   # output layer

    def forward(self, x):
        x = F.relu(self.hidden(x))      # activation function for hidden layer
        x = self.predict(x)             # linear output
        return x

# 为每个优化器创建一个 net
net_SGD         = Net()
net_Momentum    = Net()
net_RMSprop     = Net()
net_Adam        = Net()
nets = [net_SGD, net_Momentum, net_RMSprop, net_Adam]
# different optimizers
opt_SGD         = torch.optim.SGD(net_SGD.parameters(), lr=LR)
opt_Momentum    = torch.optim.SGD(net_Momentum.parameters(), lr=LR, momentum=0.8)
opt_RMSprop     = torch.optim.RMSprop(net_RMSprop.parameters(), lr=LR, alpha=0.9)
opt_Adam        = torch.optim.Adam(net_Adam.parameters(), lr=LR, betas=(0.9, 0.99))
optimizers = [opt_SGD, opt_Momentum, opt_RMSprop, opt_Adam]

loss_func = torch.nn.MSELoss()
losses_his = [[], [], [], []]   # 记录 training 时不同神经网络的 loss

for epoch in range(EPOCH):
    print('Epoch: ', epoch)
    for step, (batch_x, batch_y) in enumerate(loader):
        b_x = Variable(batch_x)  # 务必要用 Variable 包一下
        b_y = Variable(batch_y)

        # 对每个优化器, 优化属于他的神经网络
        for net, opt, l_his in zip(nets, optimizers, losses_his):
            output = net(b_x)              # get output for every net
            loss = loss_func(output, b_y)  # compute loss for every net
            opt.zero_grad()                # clear gradients for next train
            loss.backward()                # backpropagation, compute gradients
            opt.step()                     # apply gradients
            l_his.append(loss.data[0])     # loss recoder
Epoch:  0
Epoch:  1
Epoch:  2
Epoch:  3
Epoch:  4
Epoch:  5
Epoch:  6
Epoch:  7
Epoch:  8
Epoch:  9
Epoch:  10
Epoch:  11

参考:莫言python

猜你喜欢

转载自blog.csdn.net/nockinonheavensdoor/article/details/80113666