PyTorch(1):安装和基本函数介绍

PyTorch(1):安装和基本函数介绍

PyTorch学习笔记。
内容包含神经网络的介绍,PyTorch的安装、基本函数等内容。

PyTorch安装

PyTorch官网:https://pytorch.org/ (2020/6/22)

conda install pytorch torchvision cudatoolkit=10.2 -c pytorch

Torch vs Numpy

torch ≈ 神经网络中的numpy

np_data = np.arange(6).reshape((2, 3))
torch_data = torch.from_numpy(np_data) # np 转化为 torch
tensor2array = torch_data.numpy()      # torch 转化为 np

# abs
data = [-1, -2, 1, 2]
tensor = torch.FloatTensor(data)  # 32-bit floating point
print(
    '\nabs',
    '\nnumpy: ', np.abs(data),          # [1 2 1 2]
    '\ntorch: ', torch.abs(tensor)      # [1 2 1 2]
)

# sin
print(
    '\nsin',
    '\nnumpy: ', np.sin(data),      # [-0.84147098 -0.90929743  0.84147098  0.90929743]
    '\ntorch: ', torch.sin(tensor)  # [-0.8415 -0.9093  0.8415  0.9093]
)

# mean
print(
    '\nmean',
    '\nnumpy: ', np.mean(data),         # 0.0
    '\ntorch: ', torch.mean(tensor)     # 0.0
)

# matrix multiplication 矩阵相乘
data = [[1,2], [3,4]]
tensor = torch.FloatTensor(data)  # 32-bit floating point
# correct method
print(
    '\nmatrix multiplication (matmul)',
    '\nnumpy: ', np.matmul(data, data),     # [[7, 10], [15, 22]]
    '\ntorch: ', torch.mm(tensor, tensor)   # [[7, 10], [15, 22]]
)
# incorrect method
data = np.array(data)
print(
    '\nmatrix multiplication (dot)',
    '\nnumpy: ', data.dot(data),        # [[7, 10], [15, 22]]
    # 新版本torch直接报错
    '\ntorch: ', tensor.dot(tensor)     # this will convert tensor to [1,2,3,4], you'll get 30.0  相当于矩阵展平后与点乘相加  
    #
)
torch.cat((A,B), dim) # dim=0:竖着拼接; dim=1:横着拼接

torch.normal(means, std, out=None)
# means (Tensor,optional) – 所有分布均值
# std (Tensor) – 每个元素的标准差
# out (Tensor) – 可选的输出张量
torch.ones(*sizes, out=None) → Tensor
torch.zeros(*sizes, out=None) → Tensor

output = torch.max(input, dim)
# input是softmax函数输出的一个tensor
# dim是max函数索引的维度0/1,0是每列的最大值,1是每行的最大值
# 函数会返回两个tensor,第一个tensor是每行的最大值,softmax的输出中最大的是1,所以第一个tensor是全1的tensor;第二个tensor是每行最大值的索引。

torch.nn.functional.softmax(input, dim)
# 对n维输入张量运用Softmax函数,将张量的每个元素缩放到(0,1)区间且和为1。

变量(Variable)

Variable作为神经网络计算图谱中的节点,可以通过误差反向传递来更新网络参数。

tensor = torch.FloatTensor([[1,2],[3,4]])            # build a tensor
variable = Variable(tensor, requires_grad=True)      # build a variable, usually for compute gradients

v_out = torch.mean(variable*variable)   # x^2
v_out.backward()    # backpropagation from v_out

# variable的属性:
print(variable.grad)
'''
 0.5000  1.0000
 1.5000  2.0000
'''

print(variable)     # this is data in variable format
"""
Variable containing:
 1  2
 3  4
[torch.FloatTensor of size 2x2]
"""

print(variable.data)    # this is data in tensor format
"""
 1  2
 3  4
[torch.FloatTensor of size 2x2]
"""

print(variable.data.numpy())    # numpy format
"""
[[ 1.  2.]
 [ 3.  4.]]

激活函数(Activation functions)

relu, sigmoid, tanh

或创建自己的激励函数(可微)

import torch
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt

# fake data
x = torch.linspace(-5, 5, 200)  # x data (tensor), shape=(100, 1)
x = Variable(x)
x_np = x.data.numpy()   # numpy array for plotting

# following are popular activation functions
y_relu = torch.relu(x).data.numpy()
y_sigmoid = torch.sigmoid(x).data.numpy()
y_tanh = torch.tanh(x).data.numpy()
y_softplus = F.softplus(x).data.numpy() # there's no softplus in torch
# y_softmax = torch.softmax(x, dim=0).data.numpy() softmax is a special kind of activation function, it is about probability

# plt to visualize these activation function
plt.figure(1, figsize=(8, 6))
plt.subplot(221)
plt.plot(x_np, y_relu, c='red', label='relu')
plt.ylim((-1, 5))
plt.legend(loc='best')

plt.subplot(222)
plt.plot(x_np, y_sigmoid, c='red', label='sigmoid')
plt.ylim((-0.2, 1.2))
plt.legend(loc='best')

plt.subplot(223)
plt.plot(x_np, y_tanh, c='red', label='tanh')
plt.ylim((-1.2, 1.2))
plt.legend(loc='best')

plt.subplot(224)
plt.plot(x_np, y_softplus, c='red', label='softplus')
plt.ylim((-0.2, 6))
plt.legend(loc='best')

plt.show()

output:
在这里插入图片描述

猜你喜欢

转载自blog.csdn.net/qq_40326280/article/details/112297711