Pythonの基本構文-データスプライシング操作(numpy&torch)

1.アレイスプライシング

松明

1. torch.cat

import torch
a = torch.ones(10)
b = torch.zeros(10)
a,b
(tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]),
 tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]))
c = torch.cat((a,b),0)
d = torch.cat((a,b),-1)
c,d
(tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.,
         0., 0.]),
 tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.,
         0., 0.]))
m = torch.rand(2,3)
n = torch.rand(2,3)
m,n
(tensor([[0.4305, 0.5858, 0.6041],
         [0.3689, 0.7339, 0.3004]]),
 tensor([[0.3952, 0.5579, 0.6647],
         [0.3820, 0.1805, 0.4863]]))
s1 = torch.cat((m,n),0) #dim = 0 按列拼接
s1
tensor([[0.4305, 0.5858, 0.6041],
        [0.3689, 0.7339, 0.3004],
        [0.3952, 0.5579, 0.6647],
        [0.3820, 0.1805, 0.4863]])
s2 = torch.cat((m,n),dim=1) # dim=1, 按行拼接
s2
tensor([[0.4305, 0.5858, 0.6041, 0.3952, 0.5579, 0.6647],
        [0.3689, 0.7339, 0.3004, 0.3820, 0.1805, 0.4863]])
s3 = torch.cat((s2,m),dim=1)
s3
tensor([[0.4305, 0.5858, 0.6041, 0.3952, 0.5579, 0.6647, 0.4305, 0.5858, 0.6041],
        [0.3689, 0.7339, 0.3004, 0.3820, 0.1805, 0.4863, 0.3689, 0.7339, 0.3004]])

分析例:
aは[10]のリストであり、aは常に変化しており、これらの変化を配列につなぎ合わせたい

a_all = []
for i in range(5):
    a = torch.rand(3)
    if i==0: a_all = a
    else: a_all = torch.cat((a_all,a),dim=0)
a_all
tensor([0.2426, 0.6465, 0.6805, 0.3972, 0.0939, 0.9206, 0.3735, 0.6619, 0.4411,
        0.6900, 0.4391, 0.4374, 0.4946, 0.9330, 0.0059])
a.shape
torch.Size([3])
a.reshape(-1,1)
a.shape
torch.Size([3])

**注:**空のリストと接続することはできません

p = []
q = torch.rand(2,3)
p,q
([],
 tensor([[0.7906, 0.3794, 0.5541],
         [0.7933, 0.8305, 0.9623]]))
torch.cat((p,q),dim=0)
---------------------------------------------------------------------------

TypeError                                 Traceback (most recent call last)

<ipython-input-24-01adc47454b2> in <module>
----> 1 torch.cat((p,q),dim=0)


TypeError: expected Tensor as element 0 in argument 0, but got list

2. Torch.stackは、特定の次元で同じ配列を連結するために使用されます

s1 = torch.rand(2,3)
s1
tensor([[0.6762, 0.5598, 0.9276],
        [0.8154, 0.1286, 0.4169]])
s2 = torch.rand(2,3)
s2
tensor([[0.8296, 0.8844, 0.4930],
        [0.6506, 0.5582, 0.9268]])
torch.stack((s1,s2),dim=0)
tensor([[[0.6762, 0.5598, 0.9276],
         [0.8154, 0.1286, 0.4169]],

        [[0.8296, 0.8844, 0.4930],
         [0.6506, 0.5582, 0.9268]]])
torch.stack((s1,s2),dim=1)
tensor([[[0.6762, 0.5598, 0.9276],
         [0.8296, 0.8844, 0.4930]],

        [[0.8154, 0.1286, 0.4169],
         [0.6506, 0.5582, 0.9268]]])
w1 = torch.ones(10)
w2 = torch.zeros(10)
w1,w2
(tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]),
 tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]))
torch.stack((w1,w2),dim=0)
tensor([[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
        [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
torch.stack((w1,w2),dim=1)
tensor([[1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.]])
t1 = torch.stack((w1,w2),dim=1)
t1
tensor([[1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.]])
for t in t1:
    a,b = t.tolist()
    print(a,b)
1.0 0.0
1.0 0.0
1.0 0.0
1.0 0.0
1.0 0.0
1.0 0.0
1.0 0.0
1.0 0.0
1.0 0.0
1.0 0.0

ゴツゴツしたステッチ

1. np.concatenate()

import numpy as np
l1 = [32]
l2 = l1 * 5
l3 = np.concatenate((l1, l2), axis=0)
l3
array([32, 32, 32, 32, 32, 32])

2. np.vstack()は、配列を垂直方向(行順)にスタックして、新しい配列を形成します

a = np.random.rand(5)
b = np.random.rand(5)
print(a)
print(b)
[0.77971325 0.00322273 0.46507714 0.68736038 0.63308873]
[0.39370746 0.95878067 0.55683926 0.51739888 0.02944197]
c = np.vstack((a,b))
c
array([[0.77971325, 0.00322273, 0.46507714, 0.68736038, 0.63308873],
       [0.39370746, 0.95878067, 0.55683926, 0.51739888, 0.02944197]])

3. np.hstack()は、配列を水平方向(列順)にスタックして、新しい配列を形成します

d = np.hstack((a,b))
d
array([0.77971325, 0.00322273, 0.46507714, 0.68736038, 0.63308873,
       0.39370746, 0.95878067, 0.55683926, 0.51739888, 0.02944197])
d2 = np.concatenate((a,b), axis=-1)
d2
array([0.77971325, 0.00322273, 0.46507714, 0.68736038, 0.63308873,
       0.39370746, 0.95878067, 0.55683926, 0.51739888, 0.02944197])
d == d2
array([ True,  True,  True,  True,  True,  True,  True,  True,  True,
        True])

2、配列演算

1. torch.sum

ss = torch.randn(10,5)
ss
tensor([[-0.3580, -0.3733,  0.9526, -0.2283,  1.9288],
        [ 1.3071, -1.1372, -0.5722, -1.2409, -0.0080],
        [ 0.5829, -1.0200, -0.1185, -1.2181,  0.0603],
        [-0.6280,  0.9144, -0.0768,  0.0363, -0.4285],
        [-0.7456,  0.4093, -1.8571,  0.1461,  1.0394],
        [ 1.6432, -1.3011, -0.8470, -0.7020,  0.1546],
        [ 0.9789, -0.3996, -0.1452,  0.3209,  0.6135],
        [ 0.2313,  0.7755,  0.8359, -1.2404,  0.1416],
        [ 0.1149,  0.5541, -0.5762,  0.4889, -0.5913],
        [-0.4622,  0.3965,  1.5930, -0.3442,  1.0517]])
torch.sum(ss,dim=0) #按列求和
tensor([ 2.6644, -1.1814, -0.8113, -3.9817,  3.9621])
torch.sum(ss,dim=1) #按行求和
tensor([ 1.9218, -1.6512, -1.7134, -0.1827, -1.0079, -1.0523,  1.3685,  0.7440,
        -0.0095,  2.2348])

2.たとえば、条件を満たす2つの配列(同じ列)の数を見つけます

h1 = torch.randn(20,5)
h2 = torch.rand(20,5)
h1,h2
(tensor([[ 1.0787e-01,  3.6212e-01, -6.7001e-01, -1.3113e-01, -5.6334e-01],
         [ 3.7117e-01, -2.7837e-01,  5.3334e-01, -1.2462e-01, -4.6961e-01],
         [ 1.0620e-01,  1.0143e+00, -1.1995e-01, -1.5577e+00,  2.1638e+00],
         [-1.4423e+00,  6.7262e-01,  5.6922e-01, -2.6069e+00, -3.1958e-02],
         [ 4.6898e-01, -3.3921e-01,  9.3832e-01,  1.8161e-01,  1.4255e-03],
         [-4.4099e-01, -1.1822e+00,  4.9722e-01, -1.9459e+00, -2.0278e+00],
         [-4.1827e-01, -1.6319e+00,  4.0082e-01, -8.6108e-01,  8.6031e-01],
         [ 5.1789e-03, -1.0438e+00,  1.4234e+00, -8.3692e-01, -8.9327e-01],
         [ 1.1142e+00,  2.2890e+00,  9.1615e-03,  4.9670e-01, -4.2941e-01],
         [-4.6739e-01, -1.0803e+00, -6.6007e-01,  1.0975e+00, -1.6250e+00],
         [ 8.8541e-01,  6.2591e-01,  1.4147e+00, -1.0127e+00,  1.9216e-01],
         [ 1.2020e+00, -1.8767e-01, -1.0581e-01,  4.7640e-02,  5.6826e-01],
         [-5.0824e-01,  7.2207e-01,  1.1469e-01, -3.0354e-01, -5.7193e-01],
         [-1.6786e+00, -2.0825e-01,  7.5341e-02, -1.5638e+00,  9.2302e-01],
         [ 1.2906e+00,  1.2738e+00, -4.2465e-01,  2.9686e-01, -1.8713e+00],
         [ 7.4944e-02,  1.6607e-01,  1.0455e+00, -5.7186e-01,  7.7246e-01],
         [-1.3172e-01, -3.7788e-01, -5.4512e-01, -3.2986e-01,  1.2026e+00],
         [-2.6471e-01, -4.2183e-01,  9.9185e-01,  8.2221e-01, -9.7715e-01],
         [-1.3714e-01, -5.1784e-01, -1.0706e-01,  2.4261e+00,  1.1298e+00],
         [ 2.7270e-01,  7.3282e-01,  1.5422e-01, -4.9846e-01,  1.2631e+00]]),
 tensor([[0.0064, 0.2773, 0.6650, 0.3728, 0.1938],
         [0.2022, 0.4849, 0.1597, 0.2846, 0.5781],
         [0.5958, 0.6348, 0.5120, 0.5045, 0.3732],
         [0.7840, 0.0808, 0.4068, 0.5470, 0.7440],
         [0.7738, 0.2075, 0.9645, 0.8400, 0.1133],
         [0.3799, 0.3262, 0.0764, 0.6290, 0.0100],
         [0.0138, 0.2095, 0.9954, 0.1325, 0.6177],
         [0.8204, 0.2459, 0.5248, 0.9117, 0.1878],
         [0.2975, 0.3896, 0.3241, 0.1220, 0.2994],
         [0.9046, 0.1916, 0.7281, 0.7476, 0.5030],
         [0.5313, 0.1954, 0.4869, 0.7355, 0.8207],
         [0.4611, 0.4371, 0.9435, 0.3310, 0.5528],
         [0.9957, 0.9361, 0.3405, 0.5885, 0.7228],
         [0.5734, 0.7717, 0.9719, 0.0430, 0.7723],
         [0.4081, 0.0176, 0.9468, 0.2677, 0.7340],
         [0.3430, 0.3015, 0.2948, 0.9503, 0.9144],
         [0.7708, 0.7227, 0.2422, 0.0042, 0.2820],
         [0.2663, 0.0134, 0.5405, 0.6676, 0.9037],
         [0.3350, 0.2978, 0.5059, 0.0910, 0.7983],
         [0.3290, 0.3677, 0.7292, 0.0921, 0.6261]]))
mask1 = h1[:,0] > 0
mask1
tensor([ True,  True,  True, False,  True, False, False,  True,  True, False,
         True,  True, False, False,  True,  True, False, False, False,  True])
mask2 = h2[:,0] > 0.5
mask2
tensor([False, False,  True,  True,  True, False, False,  True, False,  True,
         True, False,  True,  True, False, False,  True, False, False, False])
torch.sum(mask1)
tensor(11)
torch.sum(mask2)
tensor(9)
torch.sum(mask1 & mask2) #两个数组共同满足条件的索引值个数为4
tensor(4)

3.出力を調整します

  • 左justify.ljust(10、 "-")
  • 右justify.rjust(10、“-”)
  • センターアライメント。center(10、“-”)
classes = ['airplane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
classes[0]
'airplane'
for i in range(10):
    print("%s".ljust(10,"*")%classes[i])
airplane********
car********
bird********
cat********
deer********
dog********
frog********
horse********
ship********
truck********
for i in range(10):
    print("%s".rjust(10,"*")%classes[i])
********airplane
********car
********bird
********cat
********deer
********dog
********frog
********horse
********ship
********truck
for i in range(10):
    print("%s".center(10,"*")%classes[i])
****airplane****
****car****
****bird****
****cat****
****deer****
****dog****
****frog****
****horse****
****ship****
****truck****
s1 = 'long long long .'
s2 = 'short.'
 
print ('%-30s%-20s' %(s1,s2)) #'%-30s' 含义是 左对齐,且占用30个字符位 
print ('%-30s%-20s' %(s2,s1))
long long long .              short.              
short.                        long long long .    
s1 = 'long long long .'
s2 = 'short.'
print ('{:>30}{:>20}' .format(s1,s2)) #{:30d}含义是 右对齐,且占用30个字符位 
print ('{:<30}{:<20}' .format(s1,s2)) #{:<30d}含义是 左对齐,且占用30个字符位 
print ('{:^30}{:^20}' .format(s1,s2)) #{:<30d}含义是 左对齐,且占用30个字符位 
              long long long .              short.
long long long .              short.              
       long long long .              short.       

おすすめ

転載: blog.csdn.net/qq_44783177/article/details/114235959