《深度学习之PyTorch实战计算机视觉》学习笔记(2)

这部分是关于pytorch的基本知识。代码基于python3.7, pytorch 1.0,cuda 10.0 .

import torch
a = torch.FloatTensor(2,3)    # pytorch 定义数据类型的方式,可以输入一个维度值或者列表
b = torch.FloatTensor([2,3,4,5])
# print(a)
# print(b)
c = torch.IntTensor(2,3)
d = torch.IntTensor([2,3,4,5])
print(c)
print(d)
tensor([[          0,           0,   183490976],
        [        652,           0, -2147483648]], dtype=torch.int32)
tensor([2, 3, 4, 5], dtype=torch.int32)
e = torch.rand(2,3)  # 随机生成的浮点数据在0~1区间均匀分布
f = torch.randn(2,3) # 随机生成的浮点数的取值满足均值为0、方差为1的正太分布
g = torch.arange(1,20,1) # 用于生成数据类型为浮点型且自定义起始范围和结束范围(注意是前闭后开集),参数有三个,分别是范围的起始值、结束值和步长
h = torch.zeros(2,3)  # 浮点型的Tensor中的元素值全部为0
print(e)
print(f)
print(g)
print(h)

tensor([[0.1115, 0.5678, 0.2691],
        [0.5423, 0.1575, 0.1173]])
tensor([[ 0.9564, -2.1275,  0.6420],
        [ 1.1745,  0.4277, -1.3405]])
tensor([ 1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
        19])
tensor([[0., 0., 0.],
        [0., 0., 0.]])
a = torch.randn(2,3)
print(a)
b = torch.abs(a)
print(b)
c = torch.randn(2,3)
d = torch.add(a,b)
print(d)
e = torch.add(d,10)
print(e)
f = torch.clamp(d,-0.1,0.1) # 对tensor类型的变量进行裁剪,裁剪范围为(-0.1,0.1),即将元素的值裁剪到指定的范围内
print(f)
g = torch.div(a,b)   #   除法
print(g)
h = torch.div(g,2)
print(h)
j = torch.mul(g,h)    # 乘法
print(j)
k = torch.mul(j,10)
print(k)
l = torch.pow(j,2)   # 求幂次方
print(l)
q = torch.pow(k,l)
print(q)
w = torch.randn(2,3)
print(w)
e = torch.randn(3,2)
print(e)
r = torch.mm(w,e)   #  矩阵乘法,输入的维度需要满足矩阵乘法
print(r)
t = torch.randn(3)
print(t)
y = torch.mv(w,t)  # 矩阵与向量之间的乘法规则进行计算,被传入的参数中的第1个参数代表矩阵,第2个参数代表向量,顺序不能颠倒
print(y)
tensor([[ 0.9057,  0.0965,  0.0714],
        [ 1.4697,  0.2340, -0.0850]])
tensor([[0.9057, 0.0965, 0.0714],
        [1.4697, 0.2340, 0.0850]])
tensor([[1.8114, 0.1931, 0.1427],
        [2.9393, 0.4679, 0.0000]])
tensor([[11.8114, 10.1931, 10.1427],
        [12.9393, 10.4679, 10.0000]])
tensor([[0.1000, 0.1000, 0.1000],
        [0.1000, 0.1000, 0.0000]])
tensor([[ 1.,  1.,  1.],
        [ 1.,  1., -1.]])
tensor([[ 0.5000,  0.5000,  0.5000],
        [ 0.5000,  0.5000, -0.5000]])
tensor([[0.5000, 0.5000, 0.5000],
        [0.5000, 0.5000, 0.5000]])
tensor([[5., 5., 5.],
        [5., 5., 5.]])
tensor([[0.2500, 0.2500, 0.2500],
        [0.2500, 0.2500, 0.2500]])
tensor([[1.4953, 1.4953, 1.4953],
        [1.4953, 1.4953, 1.4953]])
tensor([[-0.3443,  0.4097, -1.6661],
        [-0.4750,  1.8257, -0.8965]])
tensor([[ 1.0432, -1.5905],
        [-1.0393, -0.1865],
        [-0.6160, -1.6509]])
tensor([[ 0.2414,  3.2218],
        [-1.8406,  1.8952]])
tensor([ 0.2252, -1.9952, -0.4342])
tensor([-0.1716, -3.3604])

猜你喜欢

转载自blog.csdn.net/weixin_40017911/article/details/89014336