Pytorch API记录

1. 网络结构

torch.nn.ReflectionPad2d()

ReflectionPad2d(t) = ReflectionPad2d(t,t,t,t) 对应左右上下的padding量

m = nn.ReflectionPad2d((1, 2, 0, 3))
t = torch.arange(1, 17, dtype=torch.float).reshape(1, 1, 4, 4)
print(m(t))
tensor([[[[ 2.,  1.,  2.,  3.,  4.,  3.,  2.],
          [ 6.,  5.,  6.,  7.,  8.,  7.,  6.],
          [10.,  9., 10., 11., 12., 11., 10.],
          [14., 13., 14., 15., 16., 15., 14.],
          [10.,  9., 10., 11., 12., 11., 10.],
          [ 6.,  5.,  6.,  7.,  8.,  7.,  6.],
          [ 2.,  1.,  2.,  3.,  4.,  3.,  2.]]]])
  • padding的填充值以最靠近的维度做对称
  • padding量小于对应维度

2. 损失函数

torch.nn.TripletMarginLoss()

torch.nn.TripletMarginLoss(margin=1.0, p=2.0, eps=1e-06, 
						   swap=False, size_average=None, 
						   reduce=None, reduction='mean')

输入为3个tensor: a(Anchor), p(Positive), n(Negative), shape为(N,D)

margin 			(float,  optional) – Default: 1			ap距离-an距离>margin时, loss=0
p 				(int,    optional) – Default: 2			用p范数计算距离
swap 			(bool,   optional) – Default: False		详情见论文《Learning shallow convolutional feature descriptors with triplet losses》
size_average 	(bool,   optional) – Default: True		弃用
reduce 			(bool,   optional) – Default: True		弃用
reduction 		(string, optional) – Default: 'mean'	'none', 'mean', 'sum'

计算公式如下, 效果就是减少ap距离, 增大an距离

L ( a , p , n ) = max ⁡ { d ( a i , p i ) − d ( a i , n i ) + m a r g i n , 0 } L(a, p, n) = \max \{d(a_i, p_i) - d(a_i, n_i) + {\rm margin}, 0\} L(a,p,n)=max{ d(ai,pi)d(ai,ni)+margin,0}
d ( x i , y i ) = ∥ x i − y i ∥ p d(x_i, y_i) = \left\lVert {\bf x}_i - {\bf y}_i \right\rVert_p d(xi,yi)=xiyip

3. tensor 运算

tensor.permute()

调换 tensor 维度

torch.unsqueeze()

torch.unsqueeze(input, dim)
添加维度

torch.squeeze()

torch.squeeze(input, dim)
删除维度

tensor.narrow()

tensor.narrow(dimension, start, length)
提取 tensor 第 dim 维度上 start 至 start+length 的值(不包含 start+length)

x = torch.arange(1, 10, dtype=torch.float).reshape(3, 3)
print(x)
print(x.narrow(0,1,2))
print(x.narrow(1,0,2))
tensor([[1., 2., 3.],
        [4., 5., 6.],
        [7., 8., 9.]])
        
tensor([[4., 5., 6.],
        [7., 8., 9.]])
        
tensor([[1., 2.],
        [4., 5.],
        [7., 8.]])

torch.gather()

torch.gather(input, dim, index, *, sparse_grad=False, out=None)

取出 inputdim 维度上的 index 索引

import numpy as np
import torch

a = np.random.randint(0,10,[5,5])
a = torch.tensor(a)
idx1 = torch.tensor([[1,2,3,4,0]])
idx2 = torch.tensor([[1],[2],[3],[4],[0]])

out1 = torch.gather(a, 0, idx1)
out2 = torch.gather(a, 1, idx2)
print(a)
print(out1)
print(out2)
tensor([[9, 1, 5, 5, 5],
        [0, 2, 3, 3, 7],
        [6, 8, 2, 1, 9],
        [6, 1, 5, 4, 6],
        [7, 4, 7, 9, 7]], dtype=torch.int32)
tensor([[0, 8, 5, 9, 5]], dtype=torch.int32)
tensor([[1], [3], [1], [6], [7]], dtype=torch.int32)

torch.nonzero()

torch.nonzero(input, *, out=None, as_tuple=False)
得到 input 中非零元素的索引

import torch

a = torch.tensor([[[0.6, 0.0], [0.4, 0.0], [0.8, 0.0]],
                  [[0.0, 0.4], [0.0, 0.0], [0.3, 0.2]],
                  [[0.0, 0.0], [0.0, 0.0], [0.0,-0.3]],
                  [[0.5, 0.9], [0.0,-0.4], [0.0, 0.8]]])

print(torch.nonzero(a))
tensor([[0, 0, 0], [0, 1, 0], [0, 2, 0], [1, 0, 1], [1, 2, 0], [1, 2, 1],
        [2, 2, 1], [3, 0, 0], [3, 0, 1], [3, 1, 1], [3, 2, 1]])

常配合阈值条件使用

print(torch.nonzero(a>0.6))

print(torch.nonzero(a>0.6, as_tuple=True))
print(a[torch.nonzero(a>0.6, as_tuple=True)])
tensor([[0, 2, 0],
        [3, 0, 1],
        [3, 2, 1]])
        
(tensor([0, 3, 3]), tensor([2, 0, 2]), tensor([0, 1, 1]))

tensor([0.8000, 0.9000, 0.8000])

torch.flip()

torch.flip(input, dims)
反转 input 对应的 dims 维度

x = torch.arange(24).view(2, 3, 4)

tensor([[[ 0,  1,  2,  3], [ 4,  5,  6,  7], [ 8,  9, 10, 11]],
        [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]]])
torch.flip(x, [0])
tensor([[[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]],
        [[ 0,  1,  2,  3], [ 4,  5,  6,  7], [ 8,  9, 10, 11]]])

torch.flip(x, [1])
tensor([[[ 8,  9, 10, 11], [ 4,  5,  6,  7], [ 0,  1,  2,  3]],
        [[20, 21, 22, 23], [16, 17, 18, 19], [12, 13, 14, 15]]])

torch.flip(x, [2])
tensor([[[ 3,  2,  1,  0], [ 7,  6,  5,  4], [11, 10,  9,  8]],
        [[15, 14, 13, 12], [19, 18, 17, 16], [23, 22, 21, 20]]])

torch.flip(x, [1, 2])
tensor([[[11, 10,  9,  8], [ 7,  6,  5,  4], [ 3,  2,  1,  0]],
        [[23, 22, 21, 20], [19, 18, 17, 16], [15, 14, 13, 12]]])

torch.where()

torch.where(condition, x, y)
根据规则 condition 合并 x, y
 out  i = { x i  if condition  i y i  otherwise  \text { out }_{i}=\left\{\begin{array}{ll} \mathrm{x}_{i} & \text { if condition }_{i} \\ \mathrm{y}_{i} & \text { otherwise } \end{array}\right.  out i={ xiyi if condition i otherwise 

猜你喜欢

转载自blog.csdn.net/weixin_43605641/article/details/111224016