pytorch 学习(四)

a = torch.Tensor(2,3)
print a

#表示方法
a[0]:表示第0行
a[:,0]:表示第0列
a[0][2]:表示第0行第2个元素,等价于a[0,2]
a[0,-1]:第0行,最后一个元素
a[:2]:表示前两行
a[:2,0:2]:前两行,第0,1列
a[0:1,:2]:第0行,前两列。与a[0,:2]形状不同
a[torch.LongTensor([0,1])]:第0和第1行

2.细节

#coding=utf-8
import torch

a = torch.Tensor(2,3)
print a

b = torch.Tensor([[1,2,3],[4,5,6]])
#print b

c = b.tolist()
#print c

print b.numel()

d = torch.Tensor(b.size())
#print d


b = a.view(-1,6)
#print b

e = torch.unsqueeze(a,1)  #在第一维上增加1,变成2*1*3 (从0开始)
print e

f =a.unsqueeze(-2)   #-2表示倒数第2个维度
print f

g = a.view(1,1,1,2,3)
print g.squeeze(0)   #压缩第0维度的1,变成1*1*2*3

print g.squeeze()  #把所有维度为1的压缩 ,变成2*3

#resize是另一种可以调整size的方法,但与view不同,它,可以修改tensor大小
a = torch.ones(2,3)
print a.resize_(3,3)

3.逐元素操作

#coding=utf-8
import torch


#逐元素操作,此类操作会使输出形状等于输入形状

a =  torch.arange(0,6).view(2,3)
print a
print torch.cos(a)

print a % 3

print a **2

print torch.clamp(a,min=3)  #小于3 的变为3

4.简单的全连接层实现

#coding=utf-8
import torch
from torch.autograd import Variable
from torch import nn
'''
全连接层的实现
'''

class Liner(nn.Module):
    def __init__(self,in_features,out_features):
        super(Liner,self).__init__()
        self.w = nn.Parameter(torch.randn(in_features,out_features))
        self.b = nn.Parameter(torch.randn(out_features))

    def forward(self, x):
        x = x.mm(self.w)
        print 'x.size:',x.size()  #x.size: (2L, 3L)
        b1 = self.b.expand_as(x)  #使用b.expand_as(x)就是将b进行扩充,扩充到x的维度  .在这儿,b的两行一样
        print 'b1:',b1
        return x + b1

layer = Liner(4,3)
input = Variable(torch.randn(2,4))
output = layer(input)
print output  #x:2*4  w:4*3  xw:2*3     b:3    xw+b:2*3
for name,parameter in layer.named_parameters():
    print 'name:',name
    print 'parameter',parameter
    

结果:

x.size: (2L, 3L)
b1: Variable containing:
-0.3479  0.3417 -2.1922
-0.3479  0.3417 -2.1922
[torch.FloatTensor of size 2x3]

Variable containing:
-1.2731 -1.0239 -2.8525
 1.4926  1.3509 -2.7498
[torch.FloatTensor of size 2x3]

name: w
parameter Parameter containing:
 0.7866  2.3712  0.8656
 0.0616  0.7724 -0.4678
 0.0796  0.0966  0.5060
-0.8379 -0.0151  0.5166
[torch.FloatTensor of size 4x3]

name: b
parameter Parameter containing:
-0.3479
 0.3417
-2.1922
[torch.FloatTensor of size 3]

5.多层感知机实现

#coding=utf-8
import torch
from torch.autograd import Variable
from torch import nn
'''
多层感知机的实现
'''

class Perception(nn.Module):
    def __init__(self,in_features,hidden_features,out_features):
        super(Perception,self).__init__()
        self.layer1 = nn.Linear(in_features,hidden_features)
        self.layer2 = nn.Linear(hidden_features,out_features)

    def forward(self,x):
        x = self.layer1(x)
        x = torch.sigmoid(x)
        return self.layer2(x)  #layer2出来不再经过sigmoid函数

perception = Perception(3,4,1)

input = Variable(torch.randn(2,3))
output = perception(input)
print 'output:',output

print '----'*10

for name ,param in perception.named_parameters():
    print name,param

结果:

output: Variable containing:
-0.7951
-0.8791
[torch.FloatTensor of size 2x1]

----------------------------------------
layer1.weight Parameter containing:
-0.5728 -0.3069 -0.1073
-0.2374  0.1861 -0.3621
 0.1954  0.4269  0.4780
 0.1215  0.2511  0.2234
[torch.FloatTensor of size 4x3]

layer1.bias Parameter containing:
 0.1884
 0.5125
-0.1048
 0.0642
[torch.FloatTensor of size 4]

layer2.weight Parameter containing:
-0.2444  0.0377 -0.2948 -0.3397
[torch.FloatTensor of size 1x4]

layer2.bias Parameter containing:
-0.3831
[torch.FloatTensor of size 1]

6.图像相关层。主要包括卷积层(Conv)、池化层(pool)等。这些层在实际应用中,可以分为1D、2D、3D,池化方式又分为平均池化(AvgPool)、最大池化(MaxPool)、自适应池化(AdaptivePool)等。卷积层除了常用的前向卷积,还有逆向卷积(TransposeConv)。

7.快速搭建

net2 = torch.nn.Sequential(
    torch.nn.Linear(2,10),
    torch.nn.ReLU(),
    torch.nn.Linear(10,2)
)

print 'net2:',net2

输出:

net2: Sequential (
  (0): Linear (2 -> 10)
  (1): ReLU ()
  (2): Linear (10 -> 2)
)

8.网络或参数保存、提取

# coding=utf-8
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F

'''
保存、提取
'''

x = torch.unsqueeze(torch.linspace(-1,1,100),dim = 1)
y = x.pow(2) + 0.2*torch.rand(x.size())
print 'x:',x
print 'y:',y
x,y = Variable(x),Variable(y)

#快速搭建网络

net1 = torch.nn.Sequential(
torch.nn.Linear(1,10),
torch.nn.ReLU(),
torch.nn.Linear(10,1))
optimizer = torch.optim.SGD(net1.parameters(),lr = 0.02)
loss_func = torch.nn.MSELoss()

for t in range(100):
    prediction = net1(x)
    loss = loss_func(prediction,y)
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

#两种途径来保存
torch.save(net1,'net.pkl')  #保存整个网络

torch.save(net1.state_dict(),'net_params.pkl')  #只保存网络中的参数,速度快,内存少

#提取网络,针对保存方法1
net2 = torch.load('net.pkl')
prediction = net2(x)
print prediction

#提取网络,针对保存方法2
net3 = torch.nn.Sequential(
    torch.nn.Linear(1,10),
    torch.nn.ReLU(),
    torch.nn.Linear(10,1))

net3.load_state_dict(torch.load('net_params.pkl'))
prediction = net3(x)

9.数据批处理

# coding=utf-8
import torch
import torch.utils.data as Data
'''
批处理
'''

BATCH_SIZE = 5
x = torch.linspace(1,10,10)
y = torch.linspace(10,1,10)

#先转化为torch能识别的Dataset
torch_dataset = Data.TensorDataset(data_tensor=x,target_tensor=y)

#把dataset放入DataLoader,DataLoader用来包装自己的数据进行批训练
loader = Data.DataLoader(
    dataset=torch_dataset,
    batch_size=BATCH_SIZE, #mini batch size
    shuffle=True,   #要不要打乱数据,打乱比较好
    num_workers=2,   #多线程来读取数据
)

for epcho in range(3):
    for step,(batch_x,batch_y) in enumerate(loader):
        print ('Epoch:',epcho,'| Step:',step,'| batch x:',
        batch_x.numpy(),'| batch y:',batch_y.numpy())

运行结果:

('Epoch:', 0, '| Step:', 0, '| batch x:', array([ 7., 10.,  5.,  9.,  1.]), '| batch y:', array([ 4.,  1.,  6.,  2., 10.]))
('Epoch:', 0, '| Step:', 1, '| batch x:', array([8., 4., 2., 6., 3.]), '| batch y:', array([3., 7., 9., 5., 8.]))
('Epoch:', 1, '| Step:', 0, '| batch x:', array([7., 9., 4., 2., 8.]), '| batch y:', array([4., 2., 7., 9., 3.]))
('Epoch:', 1, '| Step:', 1, '| batch x:', array([ 1.,  3.,  5.,  6., 10.]), '| batch y:', array([10.,  8.,  6.,  5.,  1.]))
('Epoch:', 2, '| Step:', 0, '| batch x:', array([5., 7., 6., 1., 4.]), '| batch y:', array([ 6.,  4.,  5., 10.,  7.]))
('Epoch:', 2, '| Step:', 1, '| batch x:', array([ 8.,  3.,  9.,  2., 10.]), '| batch y:', array([3., 8., 2., 9., 1.]))

猜你喜欢

转载自blog.csdn.net/nanxiaoting/article/details/81070336