pytorch---张量(tensor)的基本操作

本文列举的tensor的基本操作,包括创建,维数变换,数学运算等,对于较复杂的操作已经添加注释。

#!usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author: 1234
@file: GPU_grad.py
@time: 2020/06/16
@desc:
    格式化代码 :Ctrl + Alt + L
    运行代码 : Ctrl + Shift + F10
    注释代码/取消注释 : Ctrl + /
"""
import torch
import time
import numpy as np
import visdom


def GPU_running():
    print(torch.version)
    a = torch.randn(1000, 1000)
    b = torch.randn(1000, 2000)

    t0 = time.time()
    c = torch.matmul(a, b)
    t1 = time.time()
    print(a.device, t1 - t0, c.norm(1))

    device = torch.device('cuda')
    a = a.to(device)
    b = b.to(device)
    t0 = time.time()
    c = torch.matmul(a, b)
    t1 = time.time()
    print(a.device, t1 - t0, c.norm(1))

    device = torch.device('cuda')
    a = a.to(device)
    b = b.to(device)
    t0 = time.time()
    c = torch.matmul(a, b)
    t1 = time.time()
    print(a.device, t1 - t0, c.norm(2))


def torch_grad():
    x = torch.tensor(1.)
    a = torch.tensor(1.5, requires_grad=True)
    b = torch.tensor(2.2, requires_grad=True)
    c = torch.tensor(2., requires_grad=True)

    y = a ** 2 * x + b ** 2 * x + c

    print('before:grad[a]={},grad[b]={},grad[c]={}'.format(a.grad, b.grad, c.grad))
    grads = torch.autograd.grad(y, [a, b, c])  # 用y分别对a,b,c求偏导
    print('after:grad[a]={},grad[b]={},grad[c]={}'.format(grads[0], grads[1], grads[2]))


def tensor_numpy():
    # a=np.array([2,3.4])
    # b=torch.from_numpy(a)
    # print(b)
    # a=np.ones([2,3])
    # b=torch.from_numpy(a)
    # print(b)
    print(torch.tensor([2, 3]))  # 小写接受数据
    print(torch.Tensor(2, 3))  # 大写接受维度,少用
    print(torch.FloatTensor([2, 3]))  # 用list可以让大写接受数据
    # print(torch.FloatTensor(2,3))
    # print(torch.rand(2,2,3))


def uninitialized():
    # 增强学习用double居多
    # a = torch.Tensor(1, 2)
    # print(a.type())
    # torch.set_default_tensor_type(torch.DoubleTensor)  # 转换数据类型
    # a = torch.Tensor(1, 2)
    # print(a.type())

    # a=torch.rand(2,2,3)
    # print(a)
    # print(torch.rand_like(a))#必须是输入tensor类型

    # print(torch.randn(1,2,3))#正态分布
    # print(torch.randint(1,10,[3,3]))#3,3的数据,在1-10之间

    # print(torch.normal(mean=torch.full([10], 0),
    #                    std=torch.arange(1, 0, -0.1)).reshape(2,5))#正态分布采用

    # print(torch.full([2, 3], 7))
    # print(torch.full([], 1))#形成一个标量
    # print(torch.full([2], 7))#形成一个2维的向量

    # print(torch.arange(0,10))
    # print(torch.arange(0,10,2))
    # print(torch.range(0,10))

    # print(torch.linspace(0,1,steps=4))
    # print(torch.logspace(1,2,3))#第三个参数默认步长
    # print(torch.logspace(1, 2, steps=3))

    # print(torch.eye(3,4))
    # print(torch.zeros(3,4))

    # print(torch.randperm(10))#将0到n-1的数随机打散排序
    # print(torch.rand(10,10)[torch.randperm(10)])#由于是打散的,所以可以直接调用不同的行,并同时显示
    # print(torch.rand(10, 10))

    # a = torch.rand(2, 3, 4, 5)
    # print(a[0, ...].shape == a[1, ...].shape)
    # print(a[:,:,0:25,0:26])
    # print(a.index_select(0,torch.tensor([1,2])).shape)#第二个位置必须要用tensor格式来进行数据选择
    # print(a[...].shape)
    # print(a[:,1,...].shape)
    # ...仅表示全部,为了方便,不知道也行

    a = torch.tensor([[1, 2, 3],
                      [4, 5, 6]])
    # print(torch.take(a, torch.arange(0, 2, 4)))
    print(torch.take(a, torch.tensor([0, 2, 5])))


def dimension_transform():
    a = torch.rand(4, 1, 28, 28)
    # print(a.view(4,28*28))#1*28*28合并长宽通道为一个数据,适合全连接层
    # 同时必须固定住相应的维数。
    # print(a.unsqueeze(0).shape)
    # print(a.unsqueeze(-1).shape)#-1-dim到dim+1可插入,尽量不用负数
    # b=torch.rand(32)
    # print(b.unsqueeze(1).unsqueeze(2).unsqueeze(0))
    # 在当前维数之前加维数是整体加括号
    # 在当前维数后面加维数是在每个值前后加括号

    # print(a.squeeze().shape)
    # a = torch.rand(1, 32, 1, 1)
    # print(a.expand(2,32,2,2).shape)#维数必须一致,且升维数的数据,但是不copy最开始的数据内容
    # print(a.expand(-1,-1,-1,4).shape)#-1表示维度数目不变
    # print(a.repeat(2,2,2,2).shape)#repeat表示重复的次数,需要手动计算,会更改存储

    # a = torch.rand(4, 3, 32, 32)
    # print(a.transpose(1, 3).shape)
    # b = a.transpose(1, 3).contiguous().view(4, 3 * 32 * 32).view(4, 32, 32, 3).transpose(1, 3)
    # 2,4维度位置,然后连续,再用view进行维数的变换操作,最后在交换2,4维数位置
    # print(torch.all(torch.eq(a, b)))#判断变化前后a,b是否一致,只能使用torch.eq()语句
    # print(a.t())#矩阵转置
    # print(a.permute(2, 1, 0, 3).shape)  # 直接输入所需变换的位置

    # ahead insert and expand dims with size 1 to same size
    # aim to increase the scope of our dimenation
    # match from last dimenation

    # # merge and split
    # a = torch.rand(4, 32, 8)
    # b = torch.rand(5, 32, 8)
    # print(torch.cat([a, b], dim=0).shape)
    # # 在0维度合并
    # a = torch.rand(4, 32, 8)
    # b = torch.rand(4, 32, 8)
    # print(torch.cat([a, b], dim=1).shape)
    # # 在1维合并,只有合并的维数可以不同,其他维度的大小必须相同
    # print(torch.stack([a, b], dim=0).shape)
    # print(torch.stack([a, b], dim=1).shape)
    # 在dim位置进行合并,取1表示a(上半部分),取2表示下半部分。但是需要合并的数量需要全部一致
    # k, b, c, d = torch.split(a, 1, dim=0)
    # k, b, c, d = a.chunk(4, dim=0)
    # chunk,tensor指定位置的大小,除以chunk指定的大小,得到每一个tensor的大小
    # print(k.shape, b.shape, c.shape, d.shape)


def algorithm():
    # a = torch.rand(2, 4)
    # b = torch.rand(4)
    # add
    # print(torch.eq(a + b, torch.add(a, b)))
    # sub
    # print(torch.eq(a - b, torch.sub(a, b)))
    # mul乘
    # div除以
    # 矩阵相乘,mm针对2dtensor,matmul和@推荐
    # a = torch.tensor([[1, 2],
    #                   [3, 4])
    # b = torch.tensor([[2, 1],
    #                   [4, 4]])
    # print(torch.matmul(a,b))
    # print(a@b)

    # a = torch.rand(4, 3, 28, 64)
    # b = torch.rand(4, 3, 64, 16)#对于4D只将后两维进行运算
    # print(torch.matmul(a,b).shape)#前面两维符合broadcast便可运算

    # 神经网络的初级运输
    # x.t() is apply for 2d tensor,over 3d should use transpose
    # a = torch.rand(4, 784)
    # x = torch.rand(512, 784)
    # print((a @ x.t()).shape)

    # # 指数运算
    # a = torch.full([2, 2], 3)
    # print(a.pow(2))
    # print((a.pow(2)).sqrt())
    #
    # b = torch.exp(torch.ones(2, 2))
    # print(b)
    # print(torch.log(a))  # log默认以e为底
    #
    # # 近似解
    # c = torch.tensor(3.14)
    # print(a.floor(), a.round(), a.trunc(), a.frac())  # round取整

    # 裁剪功能
    grad = torch.rand(2, 3) * 15
    # print(grad.max())
    # print(grad.median())
    # print(grad.clamp(min=10))#指定最低为10
    # print(grad.clamp(0, 10))#指定最低和最高


def statistics():
    # # norm是范数,可以理解为正则项,不是normalize正则化
    # a = torch.full([8], 1)
    # b = a.view(2, 4)
    # c = a.view(2, 2, 2)
    # print(a.norm(1), b.norm(2), c.norm(1))
    # print(b.norm(1, dim=1), b.norm(2, dim=0))

    # a = torch.arange(8).view(2, 4).float()
    # print(a)
    # print(a.min(), a.mean(), a.prod(),a.sum())
    # print(a.argmax(),a.argmin())

    # a = torch.randn(4,10)
    # print(a.argmin(dim=1),a.argmin(dim=0))#指定维度在返回指定索引

    # a = torch.randn(4, 10)
    # print(a.max(dim=1))
    # print(a.max(dim=1, keepdim=True))  # keepdim保持维度不变,不加则变成一维数据

    # a = torch.randn(4, 10)
    # print(a.topk(3., dim=1))  # 前四个最大的,返回概率和出现的可能值
    # print(a.topk(3., dim=1, largest=False))  # 范围最好的

    # a = torch.randn(4, 10)
    # print(a.kthvalue(8,dim=1))#在第二个维度上,返回最大值第八大的相关信息

    # print(a>0)
    # print(a.equal(torch.randn(4,10)))

    # superior operator
    # where
    # condition = torch.tensor([[1, 2],
    #                           [3, 4]])
    # print(condition)
    # a = torch.tensor([[0., 0.],
    #                   [0., 0.]])
    # b = torch.tensor([[1., 1.],
    #                   [1., 1.]])
    # print(torch.where(condition > 2, a, b))
    # condition为判断条件,针对同一位置的数据,若满足>2的条件
    # 则输出a的对应位置数据,如果不满足则输出b的内容

    # gather
    prob = torch.randn(4, 10)
    idx = prob.topk(3, dim=1)
    idx = idx[1]  # 获取idx输出的第二项数据,即输出向量的数据
    print(idx)
    label = torch.arange(10) + 100  # 数组的数据加上100
    long = idx.long()
    print(long)
    expend = label.expand(4, 10)
    print(expend)
    print(torch.gather(expend, dim=1, index=long))
    # 在第二维度上,按照long的输出要求,找到expend中的指定数据并输出。


# [b,c,h,w]
if __name__ == '__main__':
    statistics()

猜你喜欢

转载自blog.csdn.net/soulproficiency/article/details/106892794