Pytorch第二课:package-torch(2) 之数学操作

版权声明:本文为王小草原创文章,要转载请先联系本人哦 https://blog.csdn.net/sinat_33761963/article/details/84502938

微博:https://weibo.com/wangxiaocaoai/profile?rightmod=1&wvr=6&mod=personinfo
微信公众号:搜索"AI躁动街"


本节要点:

1 逐点计算操作

2 缩减操作

3 比较操作

4 其他操作

5 线性代数操作

1 逐点计算操作

# 导入包
import torch
# 1.计算绝对值
a = torch.FloatTensor([-1, -10])
a_abs = torch.abs(a)
print(a_abs)
tensor([  1.,  10.])
# 2.计算余弦
a = torch.randn(4)
a_cos = torch.cos(a)
print(a_cos)
tensor([ 0.3623,  0.0784,  0.9808,  0.8221])
# 3.计算双曲余弦
a = torch.randn(4)
a_cosh = torch.cosh(a)
print(a_cosh)
tensor([ 2.2139,  1.0241,  1.0118,  1.2354])
# 4.计算反余弦
a = torch.randn(4)
a_acos = torch.acos(a)
print(a_acos)
tensor([ 2.1772,  0.9836,  2.2749,  0.5994])
# 5.计算正弦
a = torch.randn(4)
a_sinh = torch.sinh(a)
print(a_sinh)
tensor([-0.9122,  0.2392,  1.2656,  0.4663])
# 6.计算双曲正弦
a = torch.randn(4)
a_sin = torch.sin(a)
print(a_sin)
# 7.计算反正弦
a = torch.randn(4)
a_asin = torch.asin(a)
print(a_asin)
tensor([-0.9362,     nan, -1.1308,  0.0407])
# 8.计算正切
a = torch.randn(4)
tan = torch.tan(a)
print(tan)
tensor([  0.0061,  -0.9945, -17.3586,  -0.3077])
# 9.计算双曲正切
a = torch.randn(4)
tanh = torch.tanh(a)
print(tanh)
tensor([ 0.9856, -0.8689, -0.6669,  0.4711])
# 10.计算一个张量的反正切
a = torch.randn(4)
atan = torch.atan(a)
print(atan)
tensor([ 0.3666,  1.0404, -0.5340,  0.0825])
# 11.计算两个张量的反正切
a = torch.randn(4)
b = torch.randn(4)
atan2 = torch.atan2(a, b)
print(atan2)
tensor([ 0.7203, -0.7093, -2.4139, -0.9148])
# 12.加法
# 直接加法 a+b
a = torch.randn(4)
b = 20
add = torch.add(a, b)
print(add)

# a + c * b
c = torch.rand(4)
add2 = torch.add(a, b, c)
print(add2)
tensor([ 20.6347,  19.7540,  21.7134,  20.2289])
tensor([ 15.7686,   8.5032,   4.6279,   5.1963])
# 13.先除后加:t + 0.5*(t1 / t2)
t = torch.randn(2, 3)
t1 = torch.randn(2, 3)
t2 = torch.randn(2, 3)
addcdiv = torch.addcdiv(t, 0.5, t1, t2)
print(addcdiv)  
tensor([[-0.9947, -1.5404, -0.5799],
        [-1.5395,  3.4531,  1.6741]])
# 14.先乘后加:t + 0.5*(t1 * t2)
addcmul = torch.addcmul(t, 0.5, t1, t2)
print(addcmul)
tensor([[-0.9904, -1.3817, -0.3505],
        [-1.2655,  3.0514,  1.3952]])
# 15.乘法计算
# 张量与标量相乘
a = torch.randn(2, 3)
a_mul = torch.mul(a, 5)
print('a:', a)
print('a_mul:', a_mul)

# 张量与张量相乘,对应位置的元素相乘
b = torch.randn(2, 3)
a_b_mul = torch.mul(a, b)
print('b:', b)
print('a_b_mul:', a_b_mul)
a: tensor([[ 0.0863,  1.7408,  0.8538],
        [ 0.8702,  0.1472,  0.2192]])
a_mul: tensor([[ 0.4315,  8.7038,  4.2688],
        [ 4.3508,  0.7359,  1.0962]])
b: tensor([[ 0.5903, -0.6919, -0.4070],
        [-0.3127,  0.1756,  0.9016]])
a_b_mul: tensor([[ 0.0510, -1.2044, -0.3474],
        [-0.2721,  0.0258,  0.1977]])
# 16.除法计算
# 张量除标量 
a = torch.randn(1,4)
a_div = torch.div(a, 2)  # a / 2
print(a_div)
tensor([[ 0.4224,  0.7171, -0.4719, -0.1562]])
# 17.张量除张量
a = torch.randn(1, 4)
b = torch.randn(1, 4)
div = torch.div(a, b)
print(div)
tensor([[ 2.2698,  0.7206,  1.0432,  0.2880]])
# 18.计算除法余数
a = torch.tensor([-1, 2, 3, 4])
a_fmod = torch.fmod(a, 2) # a%2
print(a_fmod)
tensor([-1,  0,  1,  0])
# 19.计算除法余数,余数与除数有相同的符号。
a = torch.tensor([-1, 2, 3, 4])
a_re = torch.remainder(a, 2) # a%2
print(a_re)
tensor([ 1,  0,  1,  0])
# 20.指数计算
a = torch.randn(1, 4)
a_exp = torch.exp(a)
print(a_exp)
tensor([[ 0.1983,  3.7585,  1.6955,  3.2236]])
# 21.自然对数计算
a = torch.randn(1, 4)
a_log = torch.log(a)
print(a_log)
tensor([[-0.7030,     nan,     nan, -5.1565]])
# 22.幂值计算
# 幂值为标量时
a = torch.Tensor([1, 2, 3, 4])
a_pow = torch.pow(a, 3)
print(a)
print(a_pow)

# 幂值为张量时
exp = torch.Tensor([1, 2, 3, 4])
a_pow = torch.pow(a, exp)
print(a_pow)
tensor([ 1.,  2.,  3.,  4.])
tensor([  1.,   8.,  27.,  64.])
tensor([   1.,    4.,   27.,  256.])
# 23.计算平方根的
a = torch.Tensor([1, 2, 3, 4])
a_sqrt = torch.sqrt(a)
print(a_sqrt)
tensor([ 1.0000,  1.4142,  1.7321,  2.0000])
# 24.计算平方根的倒数
a = torch.Tensor([1, 2, 3, 4])
a_rsqrt = torch.rsqrt(a)
print(a_rsqrt)
tensor([ 1.0000,  0.7071,  0.5774,  0.5000])
# 25.计算sigmoid
a = torch.randn(2, 2)
a_sig = torch.sigmoid(a)
print(a_sig)
tensor([[ 0.1865,  0.6050],
        [ 0.4330,  0.6606]])
# 26.向上取整
a = torch.randn(2, 2)
a_ceil = torch.ceil(a)
print(a_ceil)
tensor([[-1., -0.],
        [-0.,  1.]])
# 27.向下取整
a = torch.randn(2, 2)
a_floor = torch.floor(a)
print(a_floor)
tensor([[ 1., -1.],
        [ 0., -2.]])
# 28.四舍五入到整数
a = torch.tensor([-1.5, 2.1, 3.9, 4.5])
a_round = torch.round(a) # a%2
print(a_round)
tensor([-2.,  2.,  4.,  4.])
# 29.计算截断值,即去掉小数部分的整数
a = torch.tensor([-1.5, 2.1, 3.9, 4.5])
a_trunc = torch.trunc(a) # a%2
print(a_trunc)
tensor([-1.,  2.,  3.,  4.])
# 30.返回分数
a = torch.Tensor([1, 1.5, 1.6, 2.9])
a_frac = torch.frac(a)
print(a_frac)
tensor([ 0.0000,  0.5000,  0.6000,  0.9000])
# 31.取负
a = torch.randn(1, 4)
a_neg = torch.neg(a)
print(a)
print(a_neg)
tensor([[-0.0075, -1.8761,  0.6364,  0.8317]])
tensor([[ 0.0075,  1.8761, -0.6364, -0.8317]])
# 32.取倒数
a = torch.Tensor([1, 2, 3, 4])
a_r  =torch.reciprocal(a)
print(a_r)
tensor([ 1.0000,  0.5000,  0.3333,  0.2500])
# 33.取正负号
a = torch.randn(4)
a_sign = torch.sign(a)
print(a)
print(a_sign)
tensor([ 1.1125, -1.9569,  1.8638,  0.3364])
tensor([ 1., -1.,  1.,  1.])
# 34.夹紧到某个区间
#        min, if x_i < min
# y_i =  x_i, if min <= x_i <= max
#        max, if x_i > max
    
a = torch.randn(1, 4)
print(a)
a_clamp = torch.clamp(a, min=-0.7, max = 0.7)
print(a_clamp)
tensor([[-1.6682, -0.3986, -0.6452, -0.4119]])
tensor([[-0.7000, -0.3986, -0.6452, -0.4119]])
# 也可以只限定min或者max
a_clamp = torch.clamp(a, max = 0.7)
print(a_clamp)

a_clamp = torch.clamp(a, min=-0.7)
print(a_clamp)
tensor([[-1.6682, -0.3986, -0.6452, -0.4119]])
tensor([[-0.7000, -0.3986, -0.6452, -0.4119]])
# 35.对张量做线性插值
start = torch.arange(1, 5)
end = torch.Tensor(4).fill_(2)
a = torch.lerp(start, end, 0.5)
print(start)
print(end)
print(a)
tensor([ 1.,  2.,  3.,  4.])
tensor([ 2.,  2.,  2.,  2.])
tensor([ 1.5000,  2.0000,  2.5000,  3.0000])

2 缩减操作

import torch 
# 1.沿指定维度的累积积
a = torch.randn(10)
print('a:', a)

a_cumprod = torch.cumprod(a, 0)
print(a_cumprod)  # 注意维度没有变,只是累加了
a: tensor([ 2.4314, -0.3712, -1.3956, -0.7340,  0.6754,  0.9834, -0.3069,
         0.8109, -0.4161, -1.4032])
tensor([ 2.4314, -0.9026,  1.2596, -0.9246, -0.6244, -0.6141,  0.1885,
         0.1528, -0.0636,  0.0892])
# 2.沿指定维度的累积积
a = torch.randn(10)
print('a:', a)

a_cumsum = torch.cumsum(a, 0)
print(a_cumsum)  # 注意维度没有变,只是累加了
a: tensor([-0.0111,  0.3256, -0.4843, -0.5490,  0.7626,  0.6011, -0.8144,
        -0.4629,  0.4934, -0.6671])
tensor([-0.0111,  0.3145, -0.1698, -0.7188,  0.0438,  0.6449, -0.1694,
        -0.6323, -0.1389, -0.8060])
# 3.返回 (input - other) 的 p范数 。
a = torch.randn(3)
b = torch.randn(3)
a_dist = torch.dist(a, b, 3)
print(a_dist)
tensor(3.2886)
# 4.返回输入张量input 的p 范数。
a = torch.randn(4, 5)
a_n = torch.norm(a, 2)
print(a_n)

# 可指定维度
a_n = torch.norm(a, 2, 1)
print(a_n)
tensor(4.2416)
tensor([ 2.5676,  1.8141,  1.5184,  2.4087])
# 5.返回输入张量所有元素的均值。
a = torch.randn(3)
a_m = torch.mean(a)
print(a_m)

# 多维
a = torch.randn(3,3)
a_m = torch.mean(a)
print(a_m)

# 指定维度
a = torch.randn(2,3)
a_m = torch.mean(a, 1)
print(a_m)
tensor(-0.1776)
tensor(1.00000e-02 *
       4.2739)
tensor([-0.9272,  0.3165])
# 6.返回输入张量给定维度每行的中位数,同时返回一个包含中位数的索引的LongTensor。
a = torch.randn(3)
a_m = torch.median(a)
print(a_m)

# 多维
a = torch.randn(2, 3)
a_m = torch.median(a)
print(a_m)

# 指定维度
a = torch.randn(2, 3)
a_m = torch.median(a, 1)
print(a_m)
tensor(1.00000e-02 *
       -4.8281)
tensor(0.1583)
(tensor([ 0.4999, -0.3078]), tensor([ 2,  0]))
# 7.返回输入张量input 所有元素的积。
a = torch.randn(3)
a_p = torch.prod(a)
print(a_p)

# 多维
a = torch.randn(2, 3)
a_p = torch.prod(a)
print(a_p)

# 指定维度
a = torch.randn(2, 3)
a_m = torch.prod(a, 1)
print(a_p)
tensor(0.5736)
tensor(1.00000e-03 *
       3.2077)
tensor(1.00000e-03 *
       3.2077)
# 8.返回输入张量input 所有元素的标准差,套路和上面都一样,可一维,可多维,可指定维度
a = torch.randn(2, 3)
a_s = torch.std(a)
print(a_s)

tensor(0.7310)
# 9.返回输入张量input 所有元素的方差,套路和上面都一样,可一维,可多维,可指定维度
a = torch.randn(2, 3)
a_var = torch.var(a)
print(a_var)
tensor(1.7634)
# 10.返回输入张量input 所有元素的和,套路和上面都一样,可一维,可多维,可指定维度
a = torch.randn(2, 3)
a_s = torch.sum(a)
print(a_s)
tensor(-1.3865)
# 11.返回给定维dim上,每行的众数值。 同时返回一个LongTensor,包含众数职的索引。dim值默认为输入张量的最后一维。
a = torch.randn(4, 5)
a_m = torch.mode(a)
print(a_m)

a = torch.randn(4, 5)
a_m = torch.mode(a, 0)
print(a_m)
(tensor([ 0.0055,  0.1009, -0.7734, -1.3452]), tensor([ 2,  2,  2,  0]))
(tensor([-0.0192, -0.9864, -1.0947, -0.8963, -0.4842]), tensor([ 2,  0,  3,  2,  3]))

3 比较操作

import torch
# 比较两个张量是否相等
a = torch.Tensor([[1,2],[3,4]])
b = torch.Tensor([[1,2], [5,6]])
eq = torch.eq(a, b)
print(eq) # 若相同则对应位置输出1
tensor([[ 1,  1],
        [ 0,  0]], dtype=torch.uint8)
# 比较前者张量是否大于等于后者
a = torch.Tensor([[1,2],[3,4]])
b = torch.Tensor([[1,2], [5,6]])
ge = torch.ge(a, b)
print(ge)
tensor([[ 1,  1],
        [ 0,  0]], dtype=torch.uint8)
# 比较前者张量是否大于后者
a = torch.Tensor([[1,2],[3,4]])
b = torch.Tensor([[1,2], [5,6]])
gt = torch.gt(a, b)
print(gt)
tensor([[ 0,  0],
        [ 0,  0]], dtype=torch.uint8)
# 比较前者张量是否小于等于后者
a = torch.Tensor([[1,2],[3,4]])
b = torch.Tensor([[1,2], [5,6]])
le = torch.le(a, b)
print(le)
tensor([[ 1,  1],
        [ 1,  1]], dtype=torch.uint8)
# 比较前者张量是否小于等于后者
a = torch.Tensor([[1,2],[3,4]])
b = torch.Tensor([[1,2], [5,6]])
lt = torch.lt(a, b)
print(lt)
tensor([[ 0,  0],
        [ 1,  1]], dtype=torch.uint8)
# 取输入张量input指定维上第k 个最小值。如果不指定dim,则默认为input的最后一维。
a = torch.arange(1, 6)
a_kth = torch.kthvalue(a, 3)
print(a_kth)
(tensor(3.), tensor(2))
# 沿给定dim维度返回输入张量input中 k 个最大值。 如果不指定dim,
# 则默认为input的最后一维
# 如果为largest为 False ,则返回最小的 k 个值。
a = torch.randn(6, 6)
a_kth = torch.topk(a, 3)
print(a_kth)

a_kth = torch.topk(a, k=3, dim=0, largest=False)
print(a_kth)
(tensor([[ 0.8220,  0.5333,  0.4727],
        [ 0.1302,  0.0687, -0.2509],
        [ 1.4422,  0.5440,  0.0836],
        [ 2.2213,  0.4498, -0.0526],
        [ 1.5333,  1.0802,  0.8530],
        [ 1.7859,  1.4408,  0.0719]]), tensor([[ 3,  5,  4],
        [ 1,  4,  2],
        [ 4,  3,  5],
        [ 5,  0,  2],
        [ 3,  4,  5],
        [ 5,  2,  3]]))
(tensor([[-1.6596, -0.7786, -1.0045, -1.1036, -1.6679, -0.5299],
        [-0.8876, -0.3890, -0.5012, -0.7678, -0.4822,  0.0836],
        [-0.4958, -0.1593, -0.2509,  0.0719,  0.0687,  0.5333]]), tensor([[ 5,  3,  0,  1,  3,  1],
        [ 2,  5,  4,  3,  5,  2],
        [ 1,  2,  1,  5,  1,  0]]))
# 返回最大值
a = torch.randn(2, 6)
a_max = torch.max(a)
print(a_max)

# 也可以指定维度
a = torch.randn(2, 6)
a_max = torch.max(a, 1)
print(a_max)
tensor(2.5546)
(tensor([ 2.6702,  0.5046]), tensor([ 0,  4]))
# 同理,返回最小值
a_min = torch.min(a)
print(a_min)
tensor(-2.9920)
# 逐元素比较input和other , 即是否 input!=other
a = torch.Tensor([1,2,3,4])
b = torch.Tensor([1,5,6,4])
ne = torch.ne(a, b)
print(ne)
tensor([ 0,  1,  1,  0], dtype=torch.uint8)
# 排序
a = torch.randn(4,5)
a_sort, index = torch.sort(a)
print(a_sort) # 返回两个tensor
print(index)
tensor([[-1.4826, -1.0823, -1.0085, -0.0007,  0.4789],
        [-0.8378, -0.5947,  1.3993,  1.6431,  2.9925],
        [-0.1516, -0.0506, -0.0131,  0.6707,  1.0870],
        [-0.7049, -0.5773,  0.2794,  0.3003,  0.6444]])
tensor([[ 1,  2,  0,  3,  4],
        [ 1,  0,  2,  3,  4],
        [ 3,  4,  1,  2,  0],
        [ 0,  4,  1,  2,  3]])

4 其他操作

# 计算向量积
# 返回沿着维度dim上,两个张量input和other的向量积(叉积)
# input和other 必须有相同的形状,且指定的dim维上size必须为3。
a = torch.randn(4, 3)
b = torch.randn(4, 3)
c = torch.cross(a, b, dim=1)
print(c)
tensor([[-0.4633, -1.8994, -1.1017],
        [ 1.0588, -0.8399, -0.9991],
        [ 0.1041, -0.8343, -0.6406],
        [ 1.1517,  0.5626, -2.9870]])
# 2.获取对角矩阵
# 如果输入是一个向量(1D 张量),则返回一个以input为对角线元素的2D方阵
a = torch.randn(3)
print(a)
a_diag = torch.diag(a)
print(a_diag)
a_diag = torch.diag()
tensor([ 0.1308,  1.4108,  0.2051])
tensor([[ 0.1308,  0.0000,  0.0000],
        [ 0.0000,  1.4108,  0.0000],
        [ 0.0000,  0.0000,  0.2051]])
# 如果输入是一个矩阵(2D 张量),则返回一个包含input对角线元素的1D张量
a = torch.randn(2, 3)
print(a)
a_diag = torch.diag(a)
print(a_diag)
a_diag = torch.diag(a, diagonal=1)
print(a_diag)
a_diag = torch.diag(a, diagonal=-1)
print(a_diag)
a_diag = torch.diag(a, diagonal=0)
print(a_diag)
tensor([[ 0.9656, -0.5399,  1.4308],
        [ 0.2529,  0.9131, -1.4558]])
tensor([ 0.9656,  0.9131])
tensor([-0.5399, -1.4558])
tensor([ 0.2529])
tensor([ 0.9656,  0.9131])
# 3.计算输入张量的直方图
a = torch.FloatTensor([1, 2, 1])
print(a)
a_t = torch.histc(a, bins=4, min=0, max=3)
print(a_t)
tensor([ 1.,  2.,  1.])
tensor([ 0.,  2.,  1.,  0.])
# 4.返回一个张量,包含规范化后的各个子张量,
# 使得沿着dim维划分的各子张量的p范数小于maxnorm。
a = torch.ones(3,3)
a[1] = 2
a[2] = 3

a_renorm = torch.renorm(a, p=1, dim=0, maxnorm=5)
print(a_renorm)
tensor([[ 1.0000,  1.0000,  1.0000],
        [ 1.6667,  1.6667,  1.6667],
        [ 1.6667,  1.6667,  1.6667]])
# 5.返回输入2维矩阵对角线元素的和(迹)
a = torch.randn(3,3)
print(a)
a_trace = torch.trace(a)
print(a_trace)
tensor([[ 1.2111,  0.6471, -0.9622],
        [-0.5601,  0.8129,  0.0280],
        [-0.9791, -0.0244,  1.4609]])
tensor(3.4849)
# 6.返回一个张量out,包含输入矩阵(2D张量)的下三角部分,out其余部分被设为0。
# 这里所说的下三角部分为矩阵指定对角线diagonal之下的元素。
# 参数k控制对角线: - k = 0, 主对角线 - k > 0, 主对角线之上 - k < 0, 主对角线之下
a = torch.randn(3,3)
print(a)
a_tril = torch.tril(a)
print(a_tril)
a_tril = torch.tril(a,-1)
print(a_tril)
a_tril = torch.tril(a, 1)
print(a_tril)
tensor([[ 1.3586, -0.3350, -1.3682],
        [-1.0123,  0.1694, -0.3510],
        [ 0.5499,  1.3059,  0.0972]])
tensor([[ 1.3586,  0.0000,  0.0000],
        [-1.0123,  0.1694,  0.0000],
        [ 0.5499,  1.3059,  0.0972]])
tensor([[ 0.0000,  0.0000,  0.0000],
        [-1.0123,  0.0000,  0.0000],
        [ 0.5499,  1.3059,  0.0000]])
tensor([[ 1.3586, -0.3350,  0.0000],
        [-1.0123,  0.1694, -0.3510],
        [ 0.5499,  1.3059,  0.0972]])
# 7.返回一个张量,包含输入矩阵(2D张量)的上三角部分,其余部分被设为0。
# 这里所说的上三角部分为矩阵指定对角线diagonal之上的元素。
a = torch.randn(3,3)
print(a)
a_triu = torch.triu(a)
print(a_triu)
a_triu = torch.triu(a,-1)
print(a_triu)
a_triu = torch.triu(a,1)
print(a_triu)
tensor([[-0.8393,  1.9581, -1.3881],
        [-1.0005, -1.7253, -0.2855],
        [-0.7085, -0.6527, -1.3192]])
tensor([[-0.8393,  1.9581, -1.3881],
        [ 0.0000, -1.7253, -0.2855],
        [ 0.0000,  0.0000, -1.3192]])
tensor([[-0.8393,  1.9581, -1.3881],
        [-1.0005, -1.7253, -0.2855],
        [ 0.0000, -0.6527, -1.3192]])
tensor([[ 0.0000,  1.9581, -1.3881],
        [ 0.0000,  0.0000, -0.2855],
        [ 0.0000,  0.0000,  0.0000]])

5.线性代数操作

# 1.矩阵相乘,附带reduced add 步骤
# res=(beta∗M)+(alpha∗sum(batch1i@batch2i,i=0,b)) 
a = torch.arange(1, 16).view(3,5)
print(a)
batch1 = torch.arange(1, 25).view(2,3,4)
batch2 = torch.arange(1, 41).view(2,4,5)
print(batch1)
print(batch2)
a_addbmm = torch.addbmm(2, a, batch1, batch2)
print(a_addbmm)
tensor([[  1.,   2.,   3.,   4.,   5.],
        [  6.,   7.,   8.,   9.,  10.],
        [ 11.,  12.,  13.,  14.,  15.]])
tensor([[[  1.,   2.,   3.,   4.],
         [  5.,   6.,   7.,   8.],
         [  9.,  10.,  11.,  12.]],

        [[ 13.,  14.,  15.,  16.],
         [ 17.,  18.,  19.,  20.],
         [ 21.,  22.,  23.,  24.]]])
tensor([[[  1.,   2.,   3.,   4.,   5.],
         [  6.,   7.,   8.,   9.,  10.],
         [ 11.,  12.,  13.,  14.,  15.],
         [ 16.,  17.,  18.,  19.,  20.]],

        [[ 21.,  22.,  23.,  24.,  25.],
         [ 26.,  27.,  28.,  29.,  30.],
         [ 31.,  32.,  33.,  34.,  35.],
         [ 36.,  37.,  38.,  39.,  40.]]])
tensor([[ 1790.,  1860.,  1930.,  2000.,  2070.],
        [ 2392.,  2494.,  2596.,  2698.,  2800.],
        [ 2994.,  3128.,  3262.,  3396.,  3530.]])
# 2.对矩阵mat1和mat2进行矩阵乘操作。矩阵mat加到最终结果。
# out=(beta∗M)+(alpha∗mat1@mat2)
a = torch.arange(1, 7).view(2, 3)
print(a)
batch1 = torch.arange(1, 7).view(2,3)
batch2 = torch.arange(1, 10).view(3,3)
print(batch1)
print(batch2)
a_addmm = torch.addmm(a, batch1, batch2)
print(a_addmm)
tensor([[ 1.,  2.,  3.],
        [ 4.,  5.,  6.]])
tensor([[ 1.,  2.,  3.],
        [ 4.,  5.,  6.]])
tensor([[ 1.,  2.,  3.],
        [ 4.,  5.,  6.],
        [ 7.,  8.,  9.]])
tensor([[  31.,   38.,   45.],
        [  70.,   86.,  102.]])
# 3.对矩阵mat和向量vec对进行相乘操作。
# out=(beta∗tensor)+(alpha∗(mat@vec))
a = torch.randn(5)
mat = torch.randn(5, 3)
vec = torch.randn(3)
a_addmv = torch.addmv(a, mat, vec)
print(a_addmv)

tensor([-0.9355, -4.6457,  4.2649, -3.4753, -1.7256])
# 4.对向量vec1和vec2对进行张量积操作。
# resi=(beta∗Mi)+(alpha∗batch1i×batch2i)
vec1 = torch.arange(1, 4)
vec2 = torch.arange(1, 3)
M = torch.zeros(3, 2)
addr = torch.addr(M, vec1, vec2)
print(vec1, vec2,M)
print(addr)
tensor([ 1.,  2.,  3.]) tensor([ 1.,  2.]) tensor([[ 0.,  0.],
        [ 0.,  0.],
        [ 0.,  0.]])
tensor([[ 1.,  2.],
        [ 2.,  4.],
        [ 3.,  6.]])
# 5. 对两个批batch1和batch2内存储的矩阵进行批矩阵乘操作
# resi=(beta∗Mi)+(alpha∗batch1i×batch2i)
M = torch.randn(10, 3, 5)
batch1 = torch.randn(10, 3, 4)
batch2 = torch.randn(10, 4, 5)
a = torch.baddbmm(M, batch1, batch2)
print(a.size())
torch.Size([10, 3, 5])
# 6.对存储在两个批batch1和batch2内的矩阵进行批矩阵乘操作
#  res=(beta∗M)+(alpha∗sum(batch1i@batch2i,i=0,b))
batch1 = torch.randn(10, 3, 4)
batch2 = torch.randn(10, 4, 5)
res = torch.bmm(batch1, batch2)
res.size()

torch.Size([10, 3, 5])
# 7.LU 分解和pivots
A = torch.randn(2, 3, 3)
A_LU = A.btrifact()
print(A_LU)
(tensor([[[ 2.0140,  2.0395, -1.2813],
         [ 0.3414, -0.7255, -0.4129],
         [-0.2357,  0.2872, -0.4254]],

        [[ 1.1897, -1.1071,  0.2687],
         [-0.1555, -2.4885,  0.7506],
         [ 0.6283,  0.4739, -0.3595]]]), tensor([[ 3,  3,  3],
        [ 3,  2,  3]], dtype=torch.int32))
# 8.返回线性方程组Ax=b的LU解。
A = torch.randn(2, 3, 3)
b = torch.randn(2, 3)
A_LU = torch.btrifact(A)
x = b.btrisolve(*A_LU)
print(x)
tensor([[  6.1233,  -4.1202,  10.5028],
        [ -2.6603,   2.4344,  -7.1699]])
# 9.点乘
a = torch.Tensor([2,3])
b = torch.Tensor([2,1])
c = torch.dot(a, b)
print(c)
tensor(7.)
# 10.计算实方阵a 的特征值和特征向量
a = torch.rand(4,4)
a_eig = torch.eig(a, True)
print(a_eig)
(tensor([[ 2.3699,  0.0000],
        [-0.0831,  0.0000],
        [-0.0061,  0.2991],
        [-0.0061, -0.2991]]), tensor([[-0.4830, -0.7665, -0.4320, -0.2137],
        [-0.4251,  0.0396, -0.1878, -0.0776],
        [-0.6945,  0.6407,  0.6590,  0.0000],
        [-0.3219, -0.0188, -0.1880,  0.5068]]))
# 11.对形如m×n的满秩矩阵a计算其最小二乘和最小范数问题的解。
A = torch.Tensor([[1, 1, 1],
                 [2, 3, 4],
                 [3, 5, 2],
                [4, 2, 5],
                 [5, 4, 3]])
B = torch.Tensor([[-10, -3],
                      [ 12, 14],
                      [ 14, 12],
                      [ 16, 16],
                      [ 18, 16]])
X, _ = torch.gels(B, A)
print(X)
tensor([[  2.0000,   1.0000],
        [  1.0000,   1.0000],
        [  1.0000,   2.0000],
        [ 10.9635,   4.8501],
        [  8.9332,   5.2418]])
# 12.计算输入的QR 分解
A = torch.Tensor([[1, 1, 1],
                 [2, 3, 4],
                 [3, 5, 2],
                [4, 2, 5],
                 [5, 4, 3]])
a_geqrf = torch.geqrf(A)
print(a_geqrf)
(tensor([[-7.4162, -6.7420, -6.7420],
        [ 0.2376, -3.0896,  0.1471],
        [ 0.3565,  0.5272,  3.0861],
        [ 0.4753, -0.3952, -0.4312],
        [ 0.5941, -0.1411,  0.2681]]), tensor([ 1.1348,  1.3755,  1.5900]))
# 13.计算两向量vec1,vec2的张量积
v1 = torch.arange(1, 5)
v2 = torch.arange(1, 4)
a = torch.ger(v1, v2)
print(v1)
print(v2)
print(a)
tensor([ 1.,  2.,  3.,  4.])
tensor([ 1.,  2.,  3.])
tensor([[  1.,   2.,   3.],
        [  2.,   4.,   6.],
        [  3.,   6.,   9.],
        [  4.,   8.,  12.]])
# 14.X,LU=torch.gesv(B,A) ,返回线性方程组AX=B的解。
A = torch.Tensor([[6.80, -2.11,  5.66,  5.97,  8.23],
                  [-6.05, -3.30,  5.36, -4.44,  1.08],
                  [-0.45,  2.58, -2.70,  0.27,  9.04],
                [8.32,  2.71,  4.35,  -7.17,  2.14],
                 [-9.67, -5.14, -7.26,  6.08, -6.87]]).t()
B = torch.Tensor([[4.02,  6.19, -8.22, -7.57, -3.03],
                   [-1.56,  4.00, -8.67,  1.75,  2.86],
                   [9.81, -4.09, -4.57, -8.61,  8.99]]).t()
X, LU = torch.gesv(B, A)
a = torch.dist(B, torch.mm(A, X))
print(a)
tensor(1.00000e-06 *
       7.0977)
# 15.对方阵输入input 取逆。
a = torch.rand(5,5)
a = torch.inverse(a)
print(a)
tensor([[   96.7396,   -44.7571,    99.3288,   285.5245,  -425.9164],
        [ -405.2694,   185.9865,  -411.7601, -1183.8596,  1772.6039],
        [  197.1754,   -90.6226,   203.4398,   578.9345,  -868.8303],
        [  138.7625,   -63.9697,   139.4896,   400.6242,  -599.6282],
        [   74.1721,   -32.3945,    71.7191,   213.0582,  -318.0603]])
# 16.对矩阵mat1和mat2进行相乘
x = torch.rand(2,3)
y = torch.rand(3,2)
mm = torch.mm(x, y)
print(mm)
tensor([[ 1.0121,  0.7099],
        [ 0.3020,  0.1846]])
# 17.对矩阵mat和向量vec进行相乘
x = torch.rand(2,3)
y = torch.rand(3)
mv = torch.mv(x, y)
print(mv)
tensor([ 0.9517,  0.2480])

猜你喜欢

转载自blog.csdn.net/sinat_33761963/article/details/84502938